summaryrefslogtreecommitdiff
path: root/chromium/media/base
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/media/base
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'chromium/media/base')
-rw-r--r--chromium/media/base/android/OWNERS2
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.cc33
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.h63
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java168
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java302
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java338
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java214
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java169
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java429
-rw-r--r--chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java186
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc420
-rw-r--r--chromium/media/base/android/media_codec_bridge.h165
-rw-r--r--chromium/media/base/android/media_codec_bridge_unittest.cc256
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc244
-rw-r--r--chromium/media/base/android/media_drm_bridge.h83
-rw-r--r--chromium/media/base/android/media_jni_registrar.cc43
-rw-r--r--chromium/media/base/android/media_jni_registrar.h19
-rw-r--r--chromium/media/base/android/media_player_android.cc97
-rw-r--r--chromium/media/base/android/media_player_android.h141
-rw-r--r--chromium/media/base/android/media_player_bridge.cc441
-rw-r--r--chromium/media/base/android/media_player_bridge.h165
-rw-r--r--chromium/media/base/android/media_player_listener.cc103
-rw-r--r--chromium/media/base/android/media_player_listener.h66
-rw-r--r--chromium/media/base/android/media_player_manager.h143
-rw-r--r--chromium/media/base/android/media_resource_getter.cc11
-rw-r--r--chromium/media/base/android/media_resource_getter.h48
-rw-r--r--chromium/media/base/android/media_source_player.cc953
-rw-r--r--chromium/media/base/android/media_source_player.h313
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc462
-rw-r--r--chromium/media/base/android/webaudio_media_codec_bridge.cc199
-rw-r--r--chromium/media/base/android/webaudio_media_codec_bridge.h80
-rw-r--r--chromium/media/base/android/webaudio_media_codec_info.h20
-rw-r--r--chromium/media/base/audio_buffer.cc254
-rw-r--r--chromium/media/base/audio_buffer.h138
-rw-r--r--chromium/media/base/audio_buffer_queue.cc163
-rw-r--r--chromium/media/base/audio_buffer_queue.h104
-rw-r--r--chromium/media/base/audio_buffer_queue_unittest.cc467
-rw-r--r--chromium/media/base/audio_buffer_unittest.cc290
-rw-r--r--chromium/media/base/audio_bus.cc331
-rw-r--r--chromium/media/base/audio_bus.h135
-rw-r--r--chromium/media/base/audio_bus_unittest.cc472
-rw-r--r--chromium/media/base/audio_capturer_source.h68
-rw-r--r--chromium/media/base/audio_converter.cc248
-rw-r--r--chromium/media/base/audio_converter.h138
-rw-r--r--chromium/media/base/audio_converter_unittest.cc362
-rw-r--r--chromium/media/base/audio_decoder.cc13
-rw-r--r--chromium/media/base/audio_decoder.h66
-rw-r--r--chromium/media/base/audio_decoder_config.cc98
-rw-r--r--chromium/media/base/audio_decoder_config.h113
-rw-r--r--chromium/media/base/audio_fifo.cc144
-rw-r--r--chromium/media/base/audio_fifo.h68
-rw-r--r--chromium/media/base/audio_fifo_unittest.cc194
-rw-r--r--chromium/media/base/audio_hardware_config.cc80
-rw-r--r--chromium/media/base/audio_hardware_config.h55
-rw-r--r--chromium/media/base/audio_hardware_config_unittest.cc90
-rw-r--r--chromium/media/base/audio_hash.cc53
-rw-r--r--chromium/media/base/audio_hash.h59
-rw-r--r--chromium/media/base/audio_hash_unittest.cc167
-rw-r--r--chromium/media/base/audio_pull_fifo.cc66
-rw-r--r--chromium/media/base/audio_pull_fifo.h60
-rw-r--r--chromium/media/base/audio_pull_fifo_unittest.cc96
-rw-r--r--chromium/media/base/audio_renderer.cc12
-rw-r--r--chromium/media/base/audio_renderer.h92
-rw-r--r--chromium/media/base/audio_renderer_mixer.cc91
-rw-r--r--chromium/media/base/audio_renderer_mixer.h67
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.cc100
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.h78
-rw-r--r--chromium/media/base/audio_renderer_mixer_input_unittest.cc112
-rw-r--r--chromium/media/base/audio_renderer_mixer_unittest.cc468
-rw-r--r--chromium/media/base/audio_renderer_sink.h71
-rw-r--r--chromium/media/base/audio_splicer.cc130
-rw-r--r--chromium/media/base/audio_splicer.h60
-rw-r--r--chromium/media/base/audio_splicer_unittest.cc374
-rw-r--r--chromium/media/base/audio_timestamp_helper.cc75
-rw-r--r--chromium/media/base/audio_timestamp_helper.h71
-rw-r--r--chromium/media/base/audio_timestamp_helper_unittest.cc122
-rw-r--r--chromium/media/base/bind_to_loop.h172
-rw-r--r--chromium/media/base/bind_to_loop.h.pump100
-rw-r--r--chromium/media/base/bind_to_loop_unittest.cc169
-rw-r--r--chromium/media/base/bit_reader.cc81
-rw-r--r--chromium/media/base/bit_reader.h77
-rw-r--r--chromium/media/base/bit_reader_unittest.cc67
-rw-r--r--chromium/media/base/bitstream_buffer.h37
-rw-r--r--chromium/media/base/buffers.h45
-rw-r--r--chromium/media/base/byte_queue.cc84
-rw-r--r--chromium/media/base/byte_queue.h58
-rw-r--r--chromium/media/base/callback_holder.h88
-rw-r--r--chromium/media/base/callback_holder_unittest.cc125
-rw-r--r--chromium/media/base/channel_layout.cc187
-rw-r--r--chromium/media/base/channel_layout.h135
-rw-r--r--chromium/media/base/channel_mixer.cc406
-rw-r--r--chromium/media/base/channel_mixer.h50
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc180
-rw-r--r--chromium/media/base/clock.cc140
-rw-r--r--chromium/media/base/clock.h131
-rw-r--r--chromium/media/base/clock_unittest.cc253
-rw-r--r--chromium/media/base/container_names.cc1671
-rw-r--r--chromium/media/base/container_names.h70
-rw-r--r--chromium/media/base/container_names_unittest.cc220
-rw-r--r--chromium/media/base/data_buffer.cc52
-rw-r--r--chromium/media/base/data_buffer.h113
-rw-r--r--chromium/media/base/data_buffer_unittest.cc124
-rw-r--r--chromium/media/base/data_source.cc30
-rw-r--r--chromium/media/base/data_source.h79
-rw-r--r--chromium/media/base/decoder_buffer.cc87
-rw-r--r--chromium/media/base/decoder_buffer.h154
-rw-r--r--chromium/media/base/decoder_buffer_queue.cc79
-rw-r--r--chromium/media/base/decoder_buffer_queue.h70
-rw-r--r--chromium/media/base/decoder_buffer_queue_unittest.cc138
-rw-r--r--chromium/media/base/decoder_buffer_unittest.cc100
-rw-r--r--chromium/media/base/decrypt_config.cc27
-rw-r--r--chromium/media/base/decrypt_config.h80
-rw-r--r--chromium/media/base/decryptor.cc13
-rw-r--r--chromium/media/base/decryptor.h178
-rw-r--r--chromium/media/base/demuxer.cc31
-rw-r--r--chromium/media/base/demuxer.h74
-rw-r--r--chromium/media/base/demuxer_stream.cc11
-rw-r--r--chromium/media/base/demuxer_stream.h77
-rw-r--r--chromium/media/base/djb2.cc14
-rw-r--r--chromium/media/base/djb2.h41
-rw-r--r--chromium/media/base/djb2_unittest.cc15
-rw-r--r--chromium/media/base/fake_audio_render_callback.cc50
-rw-r--r--chromium/media/base/fake_audio_render_callback.h62
-rw-r--r--chromium/media/base/filter_collection.cc43
-rw-r--r--chromium/media/base/filter_collection.h46
-rw-r--r--chromium/media/base/gmock_callback_support.h107
-rw-r--r--chromium/media/base/gmock_callback_support_unittest.cc84
-rw-r--r--chromium/media/base/limits.h51
-rw-r--r--chromium/media/base/media.cc88
-rw-r--r--chromium/media/base/media.h48
-rw-r--r--chromium/media/base/media_export.h32
-rw-r--r--chromium/media/base/media_file_checker.cc110
-rw-r--r--chromium/media/base/media_file_checker.h41
-rw-r--r--chromium/media/base/media_file_checker_unittest.cc48
-rw-r--r--chromium/media/base/media_keys.cc15
-rw-r--r--chromium/media/base/media_keys.h90
-rw-r--r--chromium/media/base/media_log.cc231
-rw-r--r--chromium/media/base/media_log.h90
-rw-r--r--chromium/media/base/media_log_event.h103
-rw-r--r--chromium/media/base/media_posix.cc67
-rw-r--r--chromium/media/base/media_stub.cc19
-rw-r--r--chromium/media/base/media_switches.cc85
-rw-r--r--chromium/media/base/media_switches.h54
-rw-r--r--chromium/media/base/media_win.cc39
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.cc17
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.h39
-rw-r--r--chromium/media/base/mock_data_source_host.cc13
-rw-r--r--chromium/media/base/mock_data_source_host.h32
-rw-r--r--chromium/media/base/mock_demuxer_host.cc13
-rw-r--r--chromium/media/base/mock_demuxer_host.h36
-rw-r--r--chromium/media/base/mock_filters.cc77
-rw-r--r--chromium/media/base/mock_filters.h199
-rw-r--r--chromium/media/base/multi_channel_resampler.cc111
-rw-r--r--chromium/media/base/multi_channel_resampler.h77
-rw-r--r--chromium/media/base/multi_channel_resampler_unittest.cc139
-rw-r--r--chromium/media/base/pipeline.cc947
-rw-r--r--chromium/media/base/pipeline.h453
-rw-r--r--chromium/media/base/pipeline_status.cc24
-rw-r--r--chromium/media/base/pipeline_status.h65
-rw-r--r--chromium/media/base/pipeline_unittest.cc1179
-rw-r--r--chromium/media/base/ranges.cc15
-rw-r--r--chromium/media/base/ranges.h162
-rw-r--r--chromium/media/base/ranges_unittest.cc151
-rw-r--r--chromium/media/base/run_all_unittests.cc46
-rw-r--r--chromium/media/base/sample_format.cc55
-rw-r--r--chromium/media/base/sample_format.h37
-rw-r--r--chromium/media/base/scoped_histogram_timer.h32
-rw-r--r--chromium/media/base/scoped_histogram_timer_unittest.cc16
-rw-r--r--chromium/media/base/seekable_buffer.cc277
-rw-r--r--chromium/media/base/seekable_buffer.h184
-rw-r--r--chromium/media/base/seekable_buffer_unittest.cc352
-rw-r--r--chromium/media/base/serial_runner.cc90
-rw-r--r--chromium/media/base/serial_runner.h76
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv.h78
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_c.cc91
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc397
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_ssse3.asm318
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_ssse3.cc64
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_ssse3.h40
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_ssse3.inc200
-rw-r--r--chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc107
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb.h185
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_c.cc257
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm22
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc121
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_sse.asm23
-rw-r--r--chromium/media/base/simd/convert_yuv_to_rgb_x86.cc101
-rw-r--r--chromium/media/base/simd/convert_yuva_to_argb_mmx.asm23
-rw-r--r--chromium/media/base/simd/convert_yuva_to_argb_mmx.inc176
-rw-r--r--chromium/media/base/simd/empty_register_state_mmx.asm24
-rw-r--r--chromium/media/base/simd/filter_yuv.h36
-rw-r--r--chromium/media/base/simd/filter_yuv_c.cc38
-rw-r--r--chromium/media/base/simd/filter_yuv_mmx.cc79
-rw-r--r--chromium/media/base/simd/filter_yuv_sse2.cc72
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm23
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc168
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm149
-rw-r--r--chromium/media/base/simd/linear_scale_yuv_to_rgb_sse.asm23
-rw-r--r--chromium/media/base/simd/media_export.asm48
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm23
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc117
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse.asm23
-rw-r--r--chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm110
-rw-r--r--chromium/media/base/simd/sinc_resampler_sse.cc48
-rw-r--r--chromium/media/base/simd/vector_math_sse.cc39
-rw-r--r--chromium/media/base/simd/xcode_hack.c10
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.cc316
-rw-r--r--chromium/media/base/simd/yuv_to_rgb_table.h26
-rw-r--r--chromium/media/base/sinc_resampler.cc391
-rw-r--r--chromium/media/base/sinc_resampler.h143
-rw-r--r--chromium/media/base/sinc_resampler_unittest.cc444
-rw-r--r--chromium/media/base/stream_parser.cc13
-rw-r--r--chromium/media/base/stream_parser.h110
-rw-r--r--chromium/media/base/stream_parser_buffer.cc66
-rw-r--r--chromium/media/base/stream_parser_buffer.h50
-rw-r--r--chromium/media/base/test_data_util.cc47
-rw-r--r--chromium/media/base/test_data_util.h32
-rw-r--r--chromium/media/base/test_helpers.cc283
-rw-r--r--chromium/media/base/test_helpers.h148
-rw-r--r--chromium/media/base/text_track.h42
-rw-r--r--chromium/media/base/vector_math.cc110
-rw-r--r--chromium/media/base/vector_math.h32
-rw-r--r--chromium/media/base/vector_math_testing.h35
-rw-r--r--chromium/media/base/vector_math_unittest.cc291
-rw-r--r--chromium/media/base/video_decoder.cc25
-rw-r--r--chromium/media/base/video_decoder.h96
-rw-r--r--chromium/media/base/video_decoder_config.cc168
-rw-r--r--chromium/media/base/video_decoder_config.h158
-rw-r--r--chromium/media/base/video_frame.cc461
-rw-r--r--chromium/media/base/video_frame.h294
-rw-r--r--chromium/media/base/video_frame_unittest.cc425
-rw-r--r--chromium/media/base/video_renderer.cc12
-rw-r--r--chromium/media/base/video_renderer.h97
-rw-r--r--chromium/media/base/video_util.cc308
-rw-r--r--chromium/media/base/video_util.h89
-rw-r--r--chromium/media/base/video_util_unittest.cc391
-rw-r--r--chromium/media/base/yuv_convert.cc654
-rw-r--r--chromium/media/base/yuv_convert.h157
-rw-r--r--chromium/media/base/yuv_convert_unittest.cc970
239 files changed, 35658 insertions, 0 deletions
diff --git a/chromium/media/base/android/OWNERS b/chromium/media/base/android/OWNERS
new file mode 100644
index 00000000000..b896e286436
--- /dev/null
+++ b/chromium/media/base/android/OWNERS
@@ -0,0 +1,2 @@
+bulach@chromium.org
+qinmin@chromium.org
diff --git a/chromium/media/base/android/demuxer_stream_player_params.cc b/chromium/media/base/android/demuxer_stream_player_params.cc
new file mode 100644
index 00000000000..827be119565
--- /dev/null
+++ b/chromium/media/base/android/demuxer_stream_player_params.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/demuxer_stream_player_params.h"
+
+namespace media {
+
+MediaPlayerHostMsg_DemuxerReady_Params::
+ MediaPlayerHostMsg_DemuxerReady_Params()
+ : audio_codec(kUnknownAudioCodec),
+ audio_channels(0),
+ audio_sampling_rate(0),
+ is_audio_encrypted(false),
+ video_codec(kUnknownVideoCodec),
+ is_video_encrypted(false),
+ duration_ms(0) {}
+
+MediaPlayerHostMsg_DemuxerReady_Params::
+ ~MediaPlayerHostMsg_DemuxerReady_Params() {}
+
+AccessUnit::AccessUnit() : end_of_stream(false) {}
+
+AccessUnit::~AccessUnit() {}
+
+MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params()
+ : type(DemuxerStream::UNKNOWN) {}
+
+MediaPlayerHostMsg_ReadFromDemuxerAck_Params::
+ ~MediaPlayerHostMsg_ReadFromDemuxerAck_Params() {}
+
+} // namespace media
diff --git a/chromium/media/base/android/demuxer_stream_player_params.h b/chromium/media/base/android/demuxer_stream_player_params.h
new file mode 100644
index 00000000000..a9fb0520ae5
--- /dev/null
+++ b/chromium/media/base/android/demuxer_stream_player_params.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
+#define MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
+
+#include <string>
+#include <vector>
+
+#include "media/base/audio_decoder_config.h"
+#include "media/base/decrypt_config.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/video_decoder_config.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+
+struct MEDIA_EXPORT MediaPlayerHostMsg_DemuxerReady_Params {
+ MediaPlayerHostMsg_DemuxerReady_Params();
+ ~MediaPlayerHostMsg_DemuxerReady_Params();
+
+ AudioCodec audio_codec;
+ int audio_channels;
+ int audio_sampling_rate;
+ bool is_audio_encrypted;
+ std::vector<uint8> audio_extra_data;
+
+ VideoCodec video_codec;
+ gfx::Size video_size;
+ bool is_video_encrypted;
+ std::vector<uint8> video_extra_data;
+
+ int duration_ms;
+ std::string key_system;
+};
+
+struct MEDIA_EXPORT AccessUnit {
+ AccessUnit();
+ ~AccessUnit();
+
+ DemuxerStream::Status status;
+ bool end_of_stream;
+ // TODO(ycheo): Use the shared memory to transfer the block data.
+ std::vector<uint8> data;
+ base::TimeDelta timestamp;
+ std::vector<char> key_id;
+ std::vector<char> iv;
+ std::vector<media::SubsampleEntry> subsamples;
+};
+
+struct MEDIA_EXPORT MediaPlayerHostMsg_ReadFromDemuxerAck_Params {
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ ~MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+
+ DemuxerStream::Type type;
+ std::vector<AccessUnit> access_units;
+};
+
+}; // namespace media
+
+#endif // MEDIA_BASE_ANDROID_DEMUXER_STREAM_PLAYER_PARAMS_H_
diff --git a/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java b/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
new file mode 100644
index 00000000000..a7afdae59c6
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/AudioManagerAndroid.java
@@ -0,0 +1,168 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.content.pm.PackageManager;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
+import android.util.Log;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+@JNINamespace("media")
+class AudioManagerAndroid {
+ private static final String TAG = "AudioManagerAndroid";
+
+ // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
+ // is also widely used on other android devices.
+ private static final int DEFAULT_SAMPLING_RATE = 44100;
+ // Randomly picked up frame size which is close to return value on N4.
+ // Return this default value when
+ // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
+ private static final int DEFAULT_FRAME_PER_BUFFER = 256;
+
+ private final AudioManager mAudioManager;
+ private final Context mContext;
+
+ private BroadcastReceiver mReceiver;
+ private boolean mOriginalSpeakerStatus;
+
+ @CalledByNative
+ public void setMode(int mode) {
+ try {
+ mAudioManager.setMode(mode);
+ if (mode == AudioManager.MODE_IN_COMMUNICATION) {
+ mAudioManager.setSpeakerphoneOn(true);
+ }
+ } catch (SecurityException e) {
+ Log.e(TAG, "setMode exception: " + e.getMessage());
+ logDeviceInfo();
+ }
+ }
+
+ @CalledByNative
+ private static AudioManagerAndroid createAudioManagerAndroid(Context context) {
+ return new AudioManagerAndroid(context);
+ }
+
+ private AudioManagerAndroid(Context context) {
+ mContext = context;
+ mAudioManager = (AudioManager)mContext.getSystemService(Context.AUDIO_SERVICE);
+ }
+
+ @CalledByNative
+ public void registerHeadsetReceiver() {
+ if (mReceiver != null) {
+ return;
+ }
+
+ mOriginalSpeakerStatus = mAudioManager.isSpeakerphoneOn();
+ IntentFilter filter = new IntentFilter(Intent.ACTION_HEADSET_PLUG);
+
+ mReceiver = new BroadcastReceiver() {
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ if (Intent.ACTION_HEADSET_PLUG.equals(intent.getAction())) {
+ try {
+ mAudioManager.setSpeakerphoneOn(
+ intent.getIntExtra("state", 0) == 0);
+ } catch (SecurityException e) {
+ Log.e(TAG, "setMode exception: " + e.getMessage());
+ logDeviceInfo();
+ }
+ }
+ }
+ };
+ mContext.registerReceiver(mReceiver, filter);
+ }
+
+ @CalledByNative
+ public void unregisterHeadsetReceiver() {
+ mContext.unregisterReceiver(mReceiver);
+ mReceiver = null;
+ mAudioManager.setSpeakerphoneOn(mOriginalSpeakerStatus);
+ }
+
+ private void logDeviceInfo() {
+ Log.i(TAG, "Manufacturer:" + Build.MANUFACTURER +
+ " Board: " + Build.BOARD + " Device: " + Build.DEVICE +
+ " Model: " + Build.MODEL + " PRODUCT: " + Build.PRODUCT);
+ }
+
+ @CalledByNative
+ private int getNativeOutputSampleRate() {
+ if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
+ String sampleRateString = mAudioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null ?
+ DEFAULT_SAMPLING_RATE : Integer.parseInt(sampleRateString));
+ } else {
+ return DEFAULT_SAMPLING_RATE;
+ }
+ }
+
+ /**
+ * Returns the minimum frame size required for audio input.
+ *
+ * @param sampleRate sampling rate
+ * @param channels number of channels
+ */
+ @CalledByNative
+ private static int getMinInputFrameSize(int sampleRate, int channels) {
+ int channelConfig;
+ if (channels == 1) {
+ channelConfig = AudioFormat.CHANNEL_IN_MONO;
+ } else if (channels == 2) {
+ channelConfig = AudioFormat.CHANNEL_IN_STEREO;
+ } else {
+ return -1;
+ }
+ return AudioRecord.getMinBufferSize(
+ sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / 2 / channels;
+ }
+
+ /**
+ * Returns the minimum frame size required for audio output.
+ *
+ * @param sampleRate sampling rate
+ * @param channels number of channels
+ */
+ @CalledByNative
+ private static int getMinOutputFrameSize(int sampleRate, int channels) {
+ int channelConfig;
+ if (channels == 1) {
+ channelConfig = AudioFormat.CHANNEL_OUT_MONO;
+ } else if (channels == 2) {
+ channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
+ } else {
+ return -1;
+ }
+ return AudioTrack.getMinBufferSize(
+ sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / 2 / channels;
+ }
+
+ @CalledByNative
+ private boolean isAudioLowLatencySupported() {
+ return mContext.getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ }
+
+ @CalledByNative
+ private int getAudioLowLatencyOutputFrameSize() {
+ String framesPerBuffer =
+ mAudioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ return (framesPerBuffer == null ?
+ DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer));
+ }
+
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java b/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
new file mode 100644
index 00000000000..ed5d9478c55
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/MediaCodecBridge.java
@@ -0,0 +1,302 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.media.MediaCodec;
+import android.media.MediaCrypto;
+import android.media.MediaFormat;
+import android.view.Surface;
+import android.util.Log;
+
+import java.nio.ByteBuffer;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+/**
+ * A wrapper of the MediaCodec class to facilitate exception capturing and
+ * audio rendering.
+ */
+@JNINamespace("media")
+class MediaCodecBridge {
+
+ private static final String TAG = "MediaCodecBridge";
+
+ // Error code for MediaCodecBridge. Keep this value in sync with
+ // INFO_MEDIA_CODEC_ERROR in media_codec_bridge.h.
+ private static final int MEDIA_CODEC_ERROR = -1000;
+
+ // After a flush(), dequeueOutputBuffer() can often produce empty presentation timestamps
+ // for several frames. As a result, the player may find that the time does not increase
+ // after decoding a frame. To detect this, we check whether the presentation timestamp from
+ // dequeueOutputBuffer() is larger than input_timestamp - MAX_PRESENTATION_TIMESTAMP_SHIFT_US
+ // after a flush. And we set the presentation timestamp from dequeueOutputBuffer() to be
+ // non-decreasing for the remaining frames.
+ private static final long MAX_PRESENTATION_TIMESTAMP_SHIFT_US = 100000;
+
+ private ByteBuffer[] mInputBuffers;
+ private ByteBuffer[] mOutputBuffers;
+
+ private MediaCodec mMediaCodec;
+ private AudioTrack mAudioTrack;
+ private boolean mFlushed;
+ private long mLastPresentationTimeUs;
+
+ private static class DequeueOutputResult {
+ private final int mIndex;
+ private final int mFlags;
+ private final int mOffset;
+ private final long mPresentationTimeMicroseconds;
+ private final int mNumBytes;
+
+ private DequeueOutputResult(int index, int flags, int offset,
+ long presentationTimeMicroseconds, int numBytes) {
+ mIndex = index;
+ mFlags = flags;
+ mOffset = offset;
+ mPresentationTimeMicroseconds = presentationTimeMicroseconds;
+ mNumBytes = numBytes;
+ }
+
+ @CalledByNative("DequeueOutputResult")
+ private int index() { return mIndex; }
+
+ @CalledByNative("DequeueOutputResult")
+ private int flags() { return mFlags; }
+
+ @CalledByNative("DequeueOutputResult")
+ private int offset() { return mOffset; }
+
+ @CalledByNative("DequeueOutputResult")
+ private long presentationTimeMicroseconds() { return mPresentationTimeMicroseconds; }
+
+ @CalledByNative("DequeueOutputResult")
+ private int numBytes() { return mNumBytes; }
+ }
+
+ private MediaCodecBridge(String mime) {
+ mMediaCodec = MediaCodec.createDecoderByType(mime);
+ mLastPresentationTimeUs = 0;
+ mFlushed = true;
+ }
+
+ @CalledByNative
+ private static MediaCodecBridge create(String mime) {
+ return new MediaCodecBridge(mime);
+ }
+
+ @CalledByNative
+ private void release() {
+ mMediaCodec.release();
+ if (mAudioTrack != null) {
+ mAudioTrack.release();
+ }
+ }
+
+ @CalledByNative
+ private void start() {
+ mMediaCodec.start();
+ mInputBuffers = mMediaCodec.getInputBuffers();
+ }
+
+ @CalledByNative
+ private int dequeueInputBuffer(long timeoutUs) {
+ try {
+ return mMediaCodec.dequeueInputBuffer(timeoutUs);
+ } catch(Exception e) {
+ Log.e(TAG, "Cannot dequeue Input buffer " + e.toString());
+ }
+ return MEDIA_CODEC_ERROR;
+ }
+
+ @CalledByNative
+ private void flush() {
+ mMediaCodec.flush();
+ mFlushed = true;
+ if (mAudioTrack != null) {
+ mAudioTrack.flush();
+ }
+ }
+
+ @CalledByNative
+ private void stop() {
+ mMediaCodec.stop();
+ if (mAudioTrack != null) {
+ mAudioTrack.pause();
+ }
+ }
+
+ @CalledByNative
+ private int getOutputHeight() {
+ return mMediaCodec.getOutputFormat().getInteger(MediaFormat.KEY_HEIGHT);
+ }
+
+ @CalledByNative
+ private int getOutputWidth() {
+ return mMediaCodec.getOutputFormat().getInteger(MediaFormat.KEY_WIDTH);
+ }
+
+ @CalledByNative
+ private ByteBuffer getInputBuffer(int index) {
+ return mInputBuffers[index];
+ }
+
+ @CalledByNative
+ private ByteBuffer getOutputBuffer(int index) {
+ return mOutputBuffers[index];
+ }
+
+ @CalledByNative
+ private void queueInputBuffer(
+ int index, int offset, int size, long presentationTimeUs, int flags) {
+ resetLastPresentationTimeIfNeeded(presentationTimeUs);
+ try {
+ mMediaCodec.queueInputBuffer(index, offset, size, presentationTimeUs, flags);
+ } catch(IllegalStateException e) {
+ Log.e(TAG, "Failed to queue input buffer " + e.toString());
+ }
+ }
+
+ @CalledByNative
+ private void queueSecureInputBuffer(
+ int index, int offset, byte[] iv, byte[] keyId, int[] numBytesOfClearData,
+ int[] numBytesOfEncryptedData, int numSubSamples, long presentationTimeUs) {
+ resetLastPresentationTimeIfNeeded(presentationTimeUs);
+ try {
+ MediaCodec.CryptoInfo cryptoInfo = new MediaCodec.CryptoInfo();
+ cryptoInfo.set(numSubSamples, numBytesOfClearData, numBytesOfEncryptedData,
+ keyId, iv, MediaCodec.CRYPTO_MODE_AES_CTR);
+ mMediaCodec.queueSecureInputBuffer(index, offset, cryptoInfo, presentationTimeUs, 0);
+ } catch(IllegalStateException e) {
+ Log.e(TAG, "Failed to queue secure input buffer " + e.toString());
+ }
+ }
+
+ @CalledByNative
+ private void releaseOutputBuffer(int index, boolean render) {
+ mMediaCodec.releaseOutputBuffer(index, render);
+ }
+
+ @CalledByNative
+ private void getOutputBuffers() {
+ mOutputBuffers = mMediaCodec.getOutputBuffers();
+ }
+
+ @CalledByNative
+ private DequeueOutputResult dequeueOutputBuffer(long timeoutUs) {
+ MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
+ int index = MEDIA_CODEC_ERROR;
+ try {
+ index = mMediaCodec.dequeueOutputBuffer(info, timeoutUs);
+ if (info.presentationTimeUs < mLastPresentationTimeUs) {
+ // TODO(qinmin): return a special code through DequeueOutputResult
+ // to notify the native code the the frame has a wrong presentation
+ // timestamp and should be skipped.
+ info.presentationTimeUs = mLastPresentationTimeUs;
+ }
+ mLastPresentationTimeUs = info.presentationTimeUs;
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Cannot dequeue output buffer " + e.toString());
+ }
+ return new DequeueOutputResult(
+ index, info.flags, info.offset, info.presentationTimeUs, info.size);
+ }
+
+ @CalledByNative
+ private boolean configureVideo(MediaFormat format, Surface surface, MediaCrypto crypto,
+ int flags) {
+ try {
+ mMediaCodec.configure(format, surface, crypto, flags);
+ return true;
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Cannot configure the video codec " + e.toString());
+ }
+ return false;
+ }
+
+ @CalledByNative
+ private static MediaFormat createAudioFormat(String mime, int SampleRate, int ChannelCount) {
+ return MediaFormat.createAudioFormat(mime, SampleRate, ChannelCount);
+ }
+
+ @CalledByNative
+ private static MediaFormat createVideoFormat(String mime, int width, int height) {
+ return MediaFormat.createVideoFormat(mime, width, height);
+ }
+
+ @CalledByNative
+ private static void setCodecSpecificData(MediaFormat format, int index, ByteBuffer bytes) {
+ String name = null;
+ if (index == 0) {
+ name = "csd-0";
+ } else if (index == 1) {
+ name = "csd-1";
+ }
+ if (name != null) {
+ format.setByteBuffer(name, bytes);
+ }
+ }
+
+ @CalledByNative
+ private static void setFrameHasADTSHeader(MediaFormat format) {
+ format.setInteger(MediaFormat.KEY_IS_ADTS, 1);
+ }
+
+ @CalledByNative
+ private boolean configureAudio(MediaFormat format, MediaCrypto crypto, int flags,
+ boolean playAudio) {
+ try {
+ mMediaCodec.configure(format, null, crypto, flags);
+ if (playAudio) {
+ int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
+ int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+ int channelConfig = (channelCount == 1) ? AudioFormat.CHANNEL_OUT_MONO :
+ AudioFormat.CHANNEL_OUT_STEREO;
+ // Using 16bit PCM for output. Keep this value in sync with
+ // kBytesPerAudioOutputSample in media_codec_bridge.cc.
+ int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT);
+ mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig,
+ AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
+ }
+ return true;
+ } catch (IllegalStateException e) {
+ Log.e(TAG, "Cannot configure the audio codec " + e.toString());
+ }
+ return false;
+ }
+
+ @CalledByNative
+ private void playOutputBuffer(byte[] buf) {
+ if (mAudioTrack != null) {
+ if (AudioTrack.PLAYSTATE_PLAYING != mAudioTrack.getPlayState()) {
+ mAudioTrack.play();
+ }
+ int size = mAudioTrack.write(buf, 0, buf.length);
+ if (buf.length != size) {
+ Log.i(TAG, "Failed to send all data to audio output, expected size: " +
+ buf.length + ", actual size: " + size);
+ }
+ }
+ }
+
+ @CalledByNative
+ private void setVolume(double volume) {
+ if (mAudioTrack != null) {
+ mAudioTrack.setStereoVolume((float) volume, (float) volume);
+ }
+ }
+
+ private void resetLastPresentationTimeIfNeeded(long presentationTimeUs) {
+ if (mFlushed) {
+ mLastPresentationTimeUs =
+ Math.max(presentationTimeUs - MAX_PRESENTATION_TIMESTAMP_SHIFT_US, 0);
+ mFlushed = false;
+ }
+ }
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java b/chromium/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
new file mode 100644
index 00000000000..5f824fc6128
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/MediaDrmBridge.java
@@ -0,0 +1,338 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.media.MediaCrypto;
+import android.media.MediaDrm;
+import android.os.AsyncTask;
+import android.os.Handler;
+import android.util.Log;
+
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.util.EntityUtils;
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+/**
+ * A wrapper of the android MediaDrm class. Each MediaDrmBridge manages multiple
+ * sessions for a single MediaSourcePlayer.
+ */
+@JNINamespace("media")
+class MediaDrmBridge {
+
+ private static final String TAG = "MediaDrmBridge";
+ private MediaDrm mMediaDrm;
+ private UUID mSchemeUUID;
+ private int mNativeMediaDrmBridge;
+ // TODO(qinmin): we currently only support one session per DRM bridge.
+ // Change this to a HashMap if we start to support multiple sessions.
+ private String mSessionId;
+ private MediaCrypto mMediaCrypto;
+ private String mMimeType;
+ private Handler mhandler;
+
+ private static UUID getUUIDFromBytes(byte[] data) {
+ if (data.length != 16) {
+ return null;
+ }
+ long mostSigBits = 0;
+ long leastSigBits = 0;
+ for (int i = 0; i < 8; i++) {
+ mostSigBits = (mostSigBits << 8) | (data[i] & 0xff);
+ }
+ for (int i = 8; i < 16; i++) {
+ leastSigBits = (leastSigBits << 8) | (data[i] & 0xff);
+ }
+ return new UUID(mostSigBits, leastSigBits);
+ }
+
+ private MediaDrmBridge(UUID schemeUUID, int nativeMediaDrmBridge) {
+ try {
+ mSchemeUUID = schemeUUID;
+ mMediaDrm = new MediaDrm(schemeUUID);
+ mNativeMediaDrmBridge = nativeMediaDrmBridge;
+ mMediaDrm.setOnEventListener(new MediaDrmListener());
+ mSessionId = openSession();
+ mhandler = new Handler();
+ } catch (android.media.UnsupportedSchemeException e) {
+ Log.e(TAG, "Unsupported DRM scheme " + e.toString());
+ }
+ }
+
+ /**
+ * Open a new session and return the sessionId.
+ *
+ * @return ID of the session.
+ */
+ private String openSession() {
+ String session = null;
+ try {
+ final byte[] sessionId = mMediaDrm.openSession();
+ session = new String(sessionId, "UTF-8");
+ } catch (android.media.NotProvisionedException e) {
+ Log.e(TAG, "Cannot open a new session " + e.toString());
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot open a new session " + e.toString());
+ }
+ return session;
+ }
+
+ /**
+ * Create a new MediaDrmBridge from the crypto scheme UUID.
+ *
+ * @param schemeUUID Crypto scheme UUID.
+ * @param nativeMediaDrmBridge Native object of this class.
+ */
+ @CalledByNative
+ private static MediaDrmBridge create(byte[] schemeUUID, int nativeMediaDrmBridge) {
+ UUID cryptoScheme = getUUIDFromBytes(schemeUUID);
+ if (cryptoScheme != null && MediaDrm.isCryptoSchemeSupported(cryptoScheme)) {
+ return new MediaDrmBridge(cryptoScheme, nativeMediaDrmBridge);
+ }
+ return null;
+ }
+
+ /**
+ * Create a new MediaCrypto object from the session Id.
+ *
+ * @param sessionId Crypto session Id.
+ */
+ @CalledByNative
+ private MediaCrypto getMediaCrypto() {
+ if (mMediaCrypto != null) {
+ return mMediaCrypto;
+ }
+ try {
+ final byte[] session = mSessionId.getBytes("UTF-8");
+ if (MediaCrypto.isCryptoSchemeSupported(mSchemeUUID)) {
+ mMediaCrypto = new MediaCrypto(mSchemeUUID, session);
+ }
+ } catch (android.media.MediaCryptoException e) {
+ Log.e(TAG, "Cannot create MediaCrypto " + e.toString());
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot create MediaCrypto " + e.toString());
+ }
+ return mMediaCrypto;
+ }
+
+ /**
+ * Release the MediaDrmBridge object.
+ */
+ @CalledByNative
+ private void release() {
+ if (mMediaCrypto != null) {
+ mMediaCrypto.release();
+ }
+ if (mSessionId != null) {
+ try {
+ final byte[] session = mSessionId.getBytes("UTF-8");
+ mMediaDrm.closeSession(session);
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Failed to close session " + e.toString());
+ }
+ }
+ mMediaDrm.release();
+ }
+
+ /**
+ * Generate a key request and post an asynchronous task to the native side
+ * with the response message.
+ *
+ * @param initData Data needed to generate the key request.
+ * @param mime Mime type.
+ */
+ @CalledByNative
+ private void generateKeyRequest(byte[] initData, String mime) {
+ if (mSessionId == null) {
+ return;
+ }
+ try {
+ final byte[] session = mSessionId.getBytes("UTF-8");
+ mMimeType = mime;
+ HashMap<String, String> optionalParameters = new HashMap<String, String>();
+ final MediaDrm.KeyRequest request = mMediaDrm.getKeyRequest(
+ session, initData, mime, MediaDrm.KEY_TYPE_STREAMING, optionalParameters);
+ mhandler.post(new Runnable(){
+ public void run() {
+ nativeOnKeyMessage(mNativeMediaDrmBridge, mSessionId,
+ request.getData(), request.getDefaultUrl());
+ }
+ });
+ return;
+ } catch (android.media.NotProvisionedException e) {
+ Log.e(TAG, "Cannot get key request " + e.toString());
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot get key request " + e.toString());
+ }
+ onKeyError();
+ }
+
+ /**
+ * Cancel a key request for a session Id.
+ *
+ * @param sessionId Crypto session Id.
+ */
+ @CalledByNative
+ private void cancelKeyRequest(String sessionId) {
+ if (mSessionId == null || !mSessionId.equals(sessionId)) {
+ return;
+ }
+ try {
+ final byte[] session = sessionId.getBytes("UTF-8");
+ mMediaDrm.removeKeys(session);
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "Cannot cancel key request " + e.toString());
+ }
+ }
+
+ /**
+ * Add a key for a session Id.
+ *
+ * @param sessionId Crypto session Id.
+ * @param key Response data from the server.
+ */
+ @CalledByNative
+ private void addKey(String sessionId, byte[] key) {
+ if (mSessionId == null || !mSessionId.equals(sessionId)) {
+ return;
+ }
+ try {
+ final byte[] session = sessionId.getBytes("UTF-8");
+ mMediaDrm.provideKeyResponse(session, key);
+ mhandler.post(new Runnable() {
+ public void run() {
+ nativeOnKeyAdded(mNativeMediaDrmBridge, mSessionId);
+ }
+ });
+ return;
+ } catch (android.media.NotProvisionedException e) {
+ Log.e(TAG, "failed to provide key response " + e.toString());
+ } catch (android.media.DeniedByServerException e) {
+ Log.e(TAG, "failed to provide key response " + e.toString());
+ } catch (java.io.UnsupportedEncodingException e) {
+ Log.e(TAG, "failed to provide key response " + e.toString());
+ }
+ onKeyError();
+ }
+
+ /**
+ * Called when the provision response is received.
+ *
+ * @param response Response data from the provision server.
+ */
+ private void onProvisionResponse(byte[] response) {
+ try {
+ mMediaDrm.provideProvisionResponse(response);
+ } catch (android.media.DeniedByServerException e) {
+ Log.e(TAG, "failed to provide key response " + e.toString());
+ }
+ }
+
+ private void onKeyError() {
+ // TODO(qinmin): pass the error code to native.
+ mhandler.post(new Runnable() {
+ public void run() {
+ nativeOnKeyError(mNativeMediaDrmBridge, mSessionId);
+ }
+ });
+ }
+
+ private class MediaDrmListener implements MediaDrm.OnEventListener {
+ @Override
+ public void onEvent(MediaDrm mediaDrm, byte[] sessionId, int event, int extra,
+ byte[] data) {
+ switch(event) {
+ case MediaDrm.EVENT_PROVISION_REQUIRED:
+ MediaDrm.ProvisionRequest request = mMediaDrm.getProvisionRequest();
+ PostRequestTask postTask = new PostRequestTask(request.getData());
+ postTask.execute(request.getDefaultUrl());
+ break;
+ case MediaDrm.EVENT_KEY_REQUIRED:
+ generateKeyRequest(data, mMimeType);
+ break;
+ case MediaDrm.EVENT_KEY_EXPIRED:
+ onKeyError();
+ break;
+ case MediaDrm.EVENT_VENDOR_DEFINED:
+ assert(false);
+ break;
+ default:
+ Log.e(TAG, "Invalid DRM event " + (int)event);
+ return;
+ }
+ }
+ }
+
+ private class PostRequestTask extends AsyncTask<String, Void, Void> {
+ private static final String TAG = "PostRequestTask";
+
+ private byte[] mDrmRequest;
+ private byte[] mResponseBody;
+
+ public PostRequestTask(byte[] drmRequest) {
+ mDrmRequest = drmRequest;
+ }
+
+ @Override
+ protected Void doInBackground(String... urls) {
+ mResponseBody = postRequest(urls[0], mDrmRequest);
+ if (mResponseBody != null) {
+ Log.d(TAG, "response length=" + mResponseBody.length);
+ }
+ return null;
+ }
+
+ private byte[] postRequest(String url, byte[] drmRequest) {
+ HttpClient httpClient = new DefaultHttpClient();
+ HttpPost httpPost = new HttpPost(url + "&signedRequest=" + new String(drmRequest));
+
+ Log.d(TAG, "PostRequest:" + httpPost.getRequestLine());
+ try {
+ // Add data
+ httpPost.setHeader("Accept", "*/*");
+ httpPost.setHeader("User-Agent", "Widevine CDM v1.0");
+ httpPost.setHeader("Content-Type", "application/json");
+
+ // Execute HTTP Post Request
+ HttpResponse response = httpClient.execute(httpPost);
+
+ byte[] responseBody;
+ int responseCode = response.getStatusLine().getStatusCode();
+ if (responseCode == 200) {
+ responseBody = EntityUtils.toByteArray(response.getEntity());
+ } else {
+ Log.d(TAG, "Server returned HTTP error code " + responseCode);
+ return null;
+ }
+ return responseBody;
+ } catch (ClientProtocolException e) {
+ e.printStackTrace();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ @Override
+ protected void onPostExecute(Void v) {
+ onProvisionResponse(mResponseBody);
+ }
+ }
+
+ private native void nativeOnKeyMessage(int nativeMediaDrmBridge, String sessionId,
+ byte[] message, String destinationUrl);
+
+ private native void nativeOnKeyAdded(int nativeMediaDrmBridge, String sessionId);
+
+ private native void nativeOnKeyError(int nativeMediaDrmBridge, String sessionId);
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
new file mode 100644
index 00000000000..4b0a1aa6d1a
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerBridge.java
@@ -0,0 +1,214 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.content.Context;
+import android.media.MediaPlayer;
+import android.net.Uri;
+import android.text.TextUtils;
+import android.util.Log;
+import android.view.Surface;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+
+// A wrapper around android.media.MediaPlayer that allows the native code to use it.
+// See media/base/android/media_player_bridge.cc for the corresponding native code.
+@JNINamespace("media")
+public class MediaPlayerBridge {
+
+ private static final String TAG = "MediaPlayerBridge";
+
+ // Local player to forward this to. We don't initialize it here since the subclass might not
+ // want it.
+ private MediaPlayer mPlayer;
+
+ @CalledByNative
+ private static MediaPlayerBridge create() {
+ return new MediaPlayerBridge();
+ }
+
+ protected MediaPlayer getLocalPlayer() {
+ if (mPlayer == null) {
+ mPlayer = new MediaPlayer();
+ }
+ return mPlayer;
+ }
+
+ @CalledByNative
+ protected void setSurface(Surface surface) {
+ getLocalPlayer().setSurface(surface);
+ }
+
+ @CalledByNative
+ protected void prepareAsync() throws IllegalStateException {
+ getLocalPlayer().prepareAsync();
+ }
+
+ @CalledByNative
+ protected boolean isPlaying() {
+ return getLocalPlayer().isPlaying();
+ }
+
+ @CalledByNative
+ protected int getVideoWidth() {
+ return getLocalPlayer().getVideoWidth();
+ }
+
+ @CalledByNative
+ protected int getVideoHeight() {
+ return getLocalPlayer().getVideoHeight();
+ }
+
+ @CalledByNative
+ protected int getCurrentPosition() {
+ return getLocalPlayer().getCurrentPosition();
+ }
+
+ @CalledByNative
+ protected int getDuration() {
+ return getLocalPlayer().getDuration();
+ }
+
+ @CalledByNative
+ protected void release() {
+ getLocalPlayer().release();
+ }
+
+ @CalledByNative
+ protected void setVolume(double volume) {
+ getLocalPlayer().setVolume((float) volume, (float) volume);
+ }
+
+ @CalledByNative
+ protected void start() {
+ getLocalPlayer().start();
+ }
+
+ @CalledByNative
+ protected void pause() {
+ getLocalPlayer().pause();
+ }
+
+ @CalledByNative
+ protected void seekTo(int msec) throws IllegalStateException {
+ getLocalPlayer().seekTo(msec);
+ }
+
+ @CalledByNative
+ protected boolean setDataSource(
+ Context context, String url, String cookies, boolean hideUrlLog) {
+ Uri uri = Uri.parse(url);
+ HashMap<String, String> headersMap = new HashMap<String, String>();
+ if (hideUrlLog)
+ headersMap.put("x-hide-urls-from-log", "true");
+ if (!TextUtils.isEmpty(cookies))
+ headersMap.put("Cookie", cookies);
+ try {
+ getLocalPlayer().setDataSource(context, uri, headersMap);
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ }
+
+ protected void setOnBufferingUpdateListener(MediaPlayer.OnBufferingUpdateListener listener) {
+ getLocalPlayer().setOnBufferingUpdateListener(listener);
+ }
+
+ protected void setOnCompletionListener(MediaPlayer.OnCompletionListener listener) {
+ getLocalPlayer().setOnCompletionListener(listener);
+ }
+
+ protected void setOnErrorListener(MediaPlayer.OnErrorListener listener) {
+ getLocalPlayer().setOnErrorListener(listener);
+ }
+
+ protected void setOnPreparedListener(MediaPlayer.OnPreparedListener listener) {
+ getLocalPlayer().setOnPreparedListener(listener);
+ }
+
+ protected void setOnSeekCompleteListener(MediaPlayer.OnSeekCompleteListener listener) {
+ getLocalPlayer().setOnSeekCompleteListener(listener);
+ }
+
+ protected void setOnVideoSizeChangedListener(MediaPlayer.OnVideoSizeChangedListener listener) {
+ getLocalPlayer().setOnVideoSizeChangedListener(listener);
+ }
+
+ private static class AllowedOperations {
+ private final boolean mCanPause;
+ private final boolean mCanSeekForward;
+ private final boolean mCanSeekBackward;
+
+ private AllowedOperations(boolean canPause, boolean canSeekForward,
+ boolean canSeekBackward) {
+ mCanPause = canPause;
+ mCanSeekForward = canSeekForward;
+ mCanSeekBackward = canSeekBackward;
+ }
+
+ @CalledByNative("AllowedOperations")
+ private boolean canPause() { return mCanPause; }
+
+ @CalledByNative("AllowedOperations")
+ private boolean canSeekForward() { return mCanSeekForward; }
+
+ @CalledByNative("AllowedOperations")
+ private boolean canSeekBackward() { return mCanSeekBackward; }
+ }
+
+ /**
+ * Returns an AllowedOperations object to show all the operations that are
+ * allowed on the media player.
+ */
+ @CalledByNative
+ private static AllowedOperations getAllowedOperations(MediaPlayer player) {
+ boolean canPause = true;
+ boolean canSeekForward = true;
+ boolean canSeekBackward = true;
+ try {
+ Method getMetadata = player.getClass().getDeclaredMethod(
+ "getMetadata", boolean.class, boolean.class);
+ getMetadata.setAccessible(true);
+ Object data = getMetadata.invoke(player, false, false);
+ if (data != null) {
+ Class<?> metadataClass = data.getClass();
+ Method hasMethod = metadataClass.getDeclaredMethod("has", int.class);
+ Method getBooleanMethod = metadataClass.getDeclaredMethod("getBoolean", int.class);
+
+ int pause = (Integer) metadataClass.getField("PAUSE_AVAILABLE").get(null);
+ int seekForward =
+ (Integer) metadataClass.getField("SEEK_FORWARD_AVAILABLE").get(null);
+ int seekBackward =
+ (Integer) metadataClass.getField("SEEK_BACKWARD_AVAILABLE").get(null);
+ hasMethod.setAccessible(true);
+ getBooleanMethod.setAccessible(true);
+ canPause = !((Boolean) hasMethod.invoke(data, pause))
+ || ((Boolean) getBooleanMethod.invoke(data, pause));
+ canSeekForward = !((Boolean) hasMethod.invoke(data, seekForward))
+ || ((Boolean) getBooleanMethod.invoke(data, seekForward));
+ canSeekBackward = !((Boolean) hasMethod.invoke(data, seekBackward))
+ || ((Boolean) getBooleanMethod.invoke(data, seekBackward));
+ }
+ } catch (NoSuchMethodException e) {
+ Log.e(TAG, "Cannot find getMetadata() method: " + e);
+ } catch (InvocationTargetException e) {
+ Log.e(TAG, "Cannot invoke MediaPlayer.getMetadata() method: " + e);
+ } catch (IllegalAccessException e) {
+ Log.e(TAG, "Cannot access metadata: " + e);
+ } catch (NoSuchFieldException e) {
+ Log.e(TAG, "Cannot find matching fields in Metadata class: " + e);
+ }
+ return new AllowedOperations(canPause, canSeekForward, canSeekBackward);
+ }
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
new file mode 100644
index 00000000000..3c68589844e
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/MediaPlayerListener.java
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.Manifest.permission;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioManager;
+import android.media.MediaPlayer;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+// This class implements all the listener interface for android mediaplayer.
+// Callbacks will be sent to the native class for processing.
+@JNINamespace("media")
+class MediaPlayerListener implements MediaPlayer.OnPreparedListener,
+ MediaPlayer.OnCompletionListener,
+ MediaPlayer.OnBufferingUpdateListener,
+ MediaPlayer.OnSeekCompleteListener,
+ MediaPlayer.OnVideoSizeChangedListener,
+ MediaPlayer.OnErrorListener,
+ AudioManager.OnAudioFocusChangeListener {
+ // These values are mirrored as enums in media/base/android/media_player_bridge.h.
+ // Please ensure they stay in sync.
+ private static final int MEDIA_ERROR_FORMAT = 0;
+ private static final int MEDIA_ERROR_DECODE = 1;
+ private static final int MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 2;
+ private static final int MEDIA_ERROR_INVALID_CODE = 3;
+
+ // These values are copied from android media player.
+ public static final int MEDIA_ERROR_MALFORMED = -1007;
+ public static final int MEDIA_ERROR_TIMED_OUT = -110;
+
+ // Used to determine the class instance to dispatch the native call to.
+ private int mNativeMediaPlayerListener = 0;
+ private final Context mContext;
+
+ private MediaPlayerListener(int nativeMediaPlayerListener, Context context) {
+ mNativeMediaPlayerListener = nativeMediaPlayerListener;
+ mContext = context;
+ }
+
+ @Override
+ public boolean onError(MediaPlayer mp, int what, int extra) {
+ int errorType;
+ switch (what) {
+ case MediaPlayer.MEDIA_ERROR_UNKNOWN:
+ switch (extra) {
+ case MEDIA_ERROR_MALFORMED:
+ errorType = MEDIA_ERROR_DECODE;
+ break;
+ case MEDIA_ERROR_TIMED_OUT:
+ errorType = MEDIA_ERROR_INVALID_CODE;
+ break;
+ default:
+ errorType = MEDIA_ERROR_FORMAT;
+ break;
+ }
+ break;
+ case MediaPlayer.MEDIA_ERROR_SERVER_DIED:
+ errorType = MEDIA_ERROR_DECODE;
+ break;
+ case MediaPlayer.MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK:
+ errorType = MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK;
+ break;
+ default:
+ // There are some undocumented error codes for android media player.
+ // For example, when surfaceTexture got deleted before we setVideoSuface
+ // to NULL, mediaplayer will report error -38. These errors should be ignored
+ // and not be treated as an error to webkit.
+ errorType = MEDIA_ERROR_INVALID_CODE;
+ break;
+ }
+ nativeOnMediaError(mNativeMediaPlayerListener, errorType);
+ return true;
+ }
+
+ @Override
+ public void onVideoSizeChanged(MediaPlayer mp, int width, int height) {
+ nativeOnVideoSizeChanged(mNativeMediaPlayerListener, width, height);
+ }
+
+ @Override
+ public void onSeekComplete(MediaPlayer mp) {
+ nativeOnSeekComplete(mNativeMediaPlayerListener);
+ }
+
+ @Override
+ public void onBufferingUpdate(MediaPlayer mp, int percent) {
+ nativeOnBufferingUpdate(mNativeMediaPlayerListener, percent);
+ }
+
+ @Override
+ public void onCompletion(MediaPlayer mp) {
+ nativeOnPlaybackComplete(mNativeMediaPlayerListener);
+ }
+
+ @Override
+ public void onPrepared(MediaPlayer mp) {
+ nativeOnMediaPrepared(mNativeMediaPlayerListener);
+ }
+
+ @Override
+ public void onAudioFocusChange(int focusChange) {
+ if (focusChange == AudioManager.AUDIOFOCUS_LOSS ||
+ focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT) {
+ nativeOnMediaInterrupted(mNativeMediaPlayerListener);
+ }
+ }
+
+ @CalledByNative
+ public void releaseResources() {
+ if (mContext != null) {
+ // Unregister the wish for audio focus.
+ AudioManager am = (AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE);
+ if (am != null) {
+ am.abandonAudioFocus(this);
+ }
+ }
+ }
+
+ @CalledByNative
+ private static MediaPlayerListener create(int nativeMediaPlayerListener,
+ Context context, MediaPlayerBridge mediaPlayerBridge) {
+ final MediaPlayerListener listener =
+ new MediaPlayerListener(nativeMediaPlayerListener, context);
+ mediaPlayerBridge.setOnBufferingUpdateListener(listener);
+ mediaPlayerBridge.setOnCompletionListener(listener);
+ mediaPlayerBridge.setOnErrorListener(listener);
+ mediaPlayerBridge.setOnPreparedListener(listener);
+ mediaPlayerBridge.setOnSeekCompleteListener(listener);
+ mediaPlayerBridge.setOnVideoSizeChangedListener(listener);
+
+ AudioManager am = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+ am.requestAudioFocus(
+ listener,
+ AudioManager.STREAM_MUSIC,
+
+ // Request permanent focus.
+ AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK);
+ return listener;
+ }
+
+ /**
+ * See media/base/android/media_player_listener.cc for all the following functions.
+ */
+ private native void nativeOnMediaError(
+ int nativeMediaPlayerListener,
+ int errorType);
+
+ private native void nativeOnVideoSizeChanged(
+ int nativeMediaPlayerListener,
+ int width, int height);
+
+ private native void nativeOnBufferingUpdate(
+ int nativeMediaPlayerListener,
+ int percent);
+
+ private native void nativeOnMediaPrepared(int nativeMediaPlayerListener);
+
+ private native void nativeOnPlaybackComplete(int nativeMediaPlayerListener);
+
+ private native void nativeOnSeekComplete(int nativeMediaPlayerListener);
+
+ private native void nativeOnMediaInterrupted(int nativeMediaPlayerListener);
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java b/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java
new file mode 100644
index 00000000000..f055f35ed68
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/VideoCapture.java
@@ -0,0 +1,429 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.content.Context;
+import android.graphics.ImageFormat;
+import android.graphics.SurfaceTexture;
+import android.graphics.SurfaceTexture.OnFrameAvailableListener;
+import android.hardware.Camera;
+import android.hardware.Camera.PreviewCallback;
+import android.opengl.GLES20;
+import android.util.Log;
+import android.view.Surface;
+import android.view.WindowManager;
+
+import java.io.IOException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.Iterator;
+import java.util.List;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+@JNINamespace("media")
+public class VideoCapture implements PreviewCallback, OnFrameAvailableListener {
+ static class CaptureCapability {
+ public int mWidth = 0;
+ public int mHeight = 0;
+ public int mDesiredFps = 0;
+ }
+
+ // Some devices with OS older than JELLY_BEAN don't support YV12 format correctly.
+ // Some devices don't support YV12 format correctly even with JELLY_BEAN or newer OS.
+ // To work around the issues on those devices, we'd have to request NV21.
+ // This is a temporary hack till device manufacturers fix the problem or
+ // we don't need to support those devices any more.
+ private static class DeviceImageFormatHack {
+ private static final String[] sBUGGY_DEVICE_LIST = {
+ "SAMSUNG-SGH-I747",
+ };
+
+ static int getImageFormat() {
+ if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.JELLY_BEAN) {
+ return ImageFormat.NV21;
+ }
+
+ for (String buggyDevice : sBUGGY_DEVICE_LIST) {
+ if (buggyDevice.contentEquals(android.os.Build.MODEL)) {
+ return ImageFormat.NV21;
+ }
+ }
+
+ return ImageFormat.YV12;
+ }
+ }
+
+ private Camera mCamera;
+ public ReentrantLock mPreviewBufferLock = new ReentrantLock();
+ private int mImageFormat = ImageFormat.YV12;
+ private byte[] mColorPlane = null;
+ private Context mContext = null;
+ // True when native code has started capture.
+ private boolean mIsRunning = false;
+
+ private static final int NUM_CAPTURE_BUFFERS = 3;
+ private int mExpectedFrameSize = 0;
+ private int mId = 0;
+ // Native callback context variable.
+ private int mNativeVideoCaptureDeviceAndroid = 0;
+ private int[] mGlTextures = null;
+ private SurfaceTexture mSurfaceTexture = null;
+ private static final int GL_TEXTURE_EXTERNAL_OES = 0x8D65;
+
+ private int mCameraOrientation = 0;
+ private int mCameraFacing = 0;
+ private int mDeviceOrientation = 0;
+
+ CaptureCapability mCurrentCapability = null;
+ private static final String TAG = "VideoCapture";
+
+ @CalledByNative
+ public static VideoCapture createVideoCapture(
+ Context context, int id, int nativeVideoCaptureDeviceAndroid) {
+ return new VideoCapture(context, id, nativeVideoCaptureDeviceAndroid);
+ }
+
+ public VideoCapture(
+ Context context, int id, int nativeVideoCaptureDeviceAndroid) {
+ mContext = context;
+ mId = id;
+ mNativeVideoCaptureDeviceAndroid = nativeVideoCaptureDeviceAndroid;
+ }
+
+ // Returns true on success, false otherwise.
+ @CalledByNative
+ public boolean allocate(int width, int height, int frameRate) {
+ Log.d(TAG, "allocate: requested width=" + width +
+ ", height=" + height + ", frameRate=" + frameRate);
+ try {
+ mCamera = Camera.open(mId);
+ Camera.CameraInfo camera_info = new Camera.CameraInfo();
+ Camera.getCameraInfo(mId, camera_info);
+ mCameraOrientation = camera_info.orientation;
+ mCameraFacing = camera_info.facing;
+ mDeviceOrientation = getDeviceOrientation();
+ Log.d(TAG, "allocate: device orientation=" + mDeviceOrientation +
+ ", camera orientation=" + mCameraOrientation +
+ ", facing=" + mCameraFacing);
+
+ Camera.Parameters parameters = mCamera.getParameters();
+
+ // Calculate fps.
+ List<int[]> listFpsRange = parameters.getSupportedPreviewFpsRange();
+ if (listFpsRange.size() == 0) {
+ Log.e(TAG, "allocate: no fps range found");
+ return false;
+ }
+ int frameRateInMs = frameRate * 1000;
+ Iterator itFpsRange = listFpsRange.iterator();
+ int[] fpsRange = (int[])itFpsRange.next();
+ // Use the first range as default.
+ int fpsMin = fpsRange[0];
+ int fpsMax = fpsRange[1];
+ int newFrameRate = (fpsMin + 999) / 1000;
+ while (itFpsRange.hasNext()) {
+ fpsRange = (int[])itFpsRange.next();
+ if (fpsRange[0] <= frameRateInMs &&
+ frameRateInMs <= fpsRange[1]) {
+ fpsMin = fpsRange[0];
+ fpsMax = fpsRange[1];
+ newFrameRate = frameRate;
+ break;
+ }
+ }
+ frameRate = newFrameRate;
+ Log.d(TAG, "allocate: fps set to " + frameRate);
+
+ mCurrentCapability = new CaptureCapability();
+ mCurrentCapability.mDesiredFps = frameRate;
+
+ // Calculate size.
+ List<Camera.Size> listCameraSize =
+ parameters.getSupportedPreviewSizes();
+ int minDiff = Integer.MAX_VALUE;
+ int matchedWidth = width;
+ int matchedHeight = height;
+ Iterator itCameraSize = listCameraSize.iterator();
+ while (itCameraSize.hasNext()) {
+ Camera.Size size = (Camera.Size)itCameraSize.next();
+ int diff = Math.abs(size.width - width) +
+ Math.abs(size.height - height);
+ Log.d(TAG, "allocate: support resolution (" +
+ size.width + ", " + size.height + "), diff=" + diff);
+ // TODO(wjia): Remove this hack (forcing width to be multiple
+ // of 32) by supporting stride in video frame buffer.
+ // Right now, VideoCaptureController requires compact YV12
+ // (i.e., with no padding).
+ if (diff < minDiff && (size.width % 32 == 0)) {
+ minDiff = diff;
+ matchedWidth = size.width;
+ matchedHeight = size.height;
+ }
+ }
+ if (minDiff == Integer.MAX_VALUE) {
+ Log.e(TAG, "allocate: can not find a resolution whose width " +
+ "is multiple of 32");
+ return false;
+ }
+ mCurrentCapability.mWidth = matchedWidth;
+ mCurrentCapability.mHeight = matchedHeight;
+ Log.d(TAG, "allocate: matched width=" + matchedWidth +
+ ", height=" + matchedHeight);
+
+ calculateImageFormat(matchedWidth, matchedHeight);
+
+ parameters.setPreviewSize(matchedWidth, matchedHeight);
+ parameters.setPreviewFormat(mImageFormat);
+ parameters.setPreviewFpsRange(fpsMin, fpsMax);
+ mCamera.setParameters(parameters);
+
+ // Set SurfaceTexture.
+ mGlTextures = new int[1];
+ // Generate one texture pointer and bind it as an external texture.
+ GLES20.glGenTextures(1, mGlTextures, 0);
+ GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, mGlTextures[0]);
+ // No mip-mapping with camera source.
+ GLES20.glTexParameterf(GL_TEXTURE_EXTERNAL_OES,
+ GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR);
+ GLES20.glTexParameterf(GL_TEXTURE_EXTERNAL_OES,
+ GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
+ // Clamp to edge is only option.
+ GLES20.glTexParameteri(GL_TEXTURE_EXTERNAL_OES,
+ GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GL_TEXTURE_EXTERNAL_OES,
+ GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+
+ mSurfaceTexture = new SurfaceTexture(mGlTextures[0]);
+ mSurfaceTexture.setOnFrameAvailableListener(null);
+
+ mCamera.setPreviewTexture(mSurfaceTexture);
+
+ int bufSize = matchedWidth * matchedHeight *
+ ImageFormat.getBitsPerPixel(mImageFormat) / 8;
+ for (int i = 0; i < NUM_CAPTURE_BUFFERS; i++) {
+ byte[] buffer = new byte[bufSize];
+ mCamera.addCallbackBuffer(buffer);
+ }
+ mExpectedFrameSize = bufSize;
+ } catch (IOException ex) {
+ Log.e(TAG, "allocate: " + ex);
+ return false;
+ }
+
+ return true;
+ }
+
+ @CalledByNative
+ public int queryWidth() {
+ return mCurrentCapability.mWidth;
+ }
+
+ @CalledByNative
+ public int queryHeight() {
+ return mCurrentCapability.mHeight;
+ }
+
+ @CalledByNative
+ public int queryFrameRate() {
+ return mCurrentCapability.mDesiredFps;
+ }
+
+ @CalledByNative
+ public int startCapture() {
+ if (mCamera == null) {
+ Log.e(TAG, "startCapture: camera is null");
+ return -1;
+ }
+
+ mPreviewBufferLock.lock();
+ try {
+ if (mIsRunning) {
+ return 0;
+ }
+ mIsRunning = true;
+ } finally {
+ mPreviewBufferLock.unlock();
+ }
+ mCamera.setPreviewCallbackWithBuffer(this);
+ mCamera.startPreview();
+ return 0;
+ }
+
+ @CalledByNative
+ public int stopCapture() {
+ if (mCamera == null) {
+ Log.e(TAG, "stopCapture: camera is null");
+ return 0;
+ }
+
+ mPreviewBufferLock.lock();
+ try {
+ if (!mIsRunning) {
+ return 0;
+ }
+ mIsRunning = false;
+ } finally {
+ mPreviewBufferLock.unlock();
+ }
+
+ mCamera.stopPreview();
+ mCamera.setPreviewCallbackWithBuffer(null);
+ return 0;
+ }
+
+ @CalledByNative
+ public void deallocate() {
+ if (mCamera == null)
+ return;
+
+ stopCapture();
+ try {
+ mCamera.setPreviewTexture(null);
+ if (mGlTextures != null)
+ GLES20.glDeleteTextures(1, mGlTextures, 0);
+ mCurrentCapability = null;
+ mCamera.release();
+ mCamera = null;
+ } catch (IOException ex) {
+ Log.e(TAG, "deallocate: failed to deallocate camera, " + ex);
+ return;
+ }
+ }
+
+ @Override
+ public void onPreviewFrame(byte[] data, Camera camera) {
+ mPreviewBufferLock.lock();
+ try {
+ if (!mIsRunning) {
+ return;
+ }
+ if (data.length == mExpectedFrameSize) {
+ int rotation = getDeviceOrientation();
+ if (rotation != mDeviceOrientation) {
+ mDeviceOrientation = rotation;
+ Log.d(TAG,
+ "onPreviewFrame: device orientation=" +
+ mDeviceOrientation + ", camera orientation=" +
+ mCameraOrientation);
+ }
+ boolean flipVertical = false;
+ boolean flipHorizontal = false;
+ if (mCameraFacing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
+ rotation = (mCameraOrientation + rotation) % 360;
+ rotation = (360 - rotation) % 360;
+ flipHorizontal = (rotation == 270 || rotation == 90);
+ flipVertical = flipHorizontal;
+ } else {
+ rotation = (mCameraOrientation - rotation + 360) % 360;
+ }
+ if (mImageFormat == ImageFormat.NV21) {
+ convertNV21ToYV12(data);
+ }
+ nativeOnFrameAvailable(mNativeVideoCaptureDeviceAndroid,
+ data, mExpectedFrameSize,
+ rotation, flipVertical, flipHorizontal);
+ }
+ } finally {
+ mPreviewBufferLock.unlock();
+ if (camera != null) {
+ camera.addCallbackBuffer(data);
+ }
+ }
+ }
+
+ // TODO(wjia): investigate whether reading from texture could give better
+ // performance and frame rate.
+ @Override
+ public void onFrameAvailable(SurfaceTexture surfaceTexture) { }
+
+ private static class ChromiumCameraInfo {
+ private final int mId;
+ private final Camera.CameraInfo mCameraInfo;
+
+ private ChromiumCameraInfo(int index) {
+ mId = index;
+ mCameraInfo = new Camera.CameraInfo();
+ Camera.getCameraInfo(index, mCameraInfo);
+ }
+
+ @CalledByNative("ChromiumCameraInfo")
+ private static int getNumberOfCameras() {
+ return Camera.getNumberOfCameras();
+ }
+
+ @CalledByNative("ChromiumCameraInfo")
+ private static ChromiumCameraInfo getAt(int index) {
+ return new ChromiumCameraInfo(index);
+ }
+
+ @CalledByNative("ChromiumCameraInfo")
+ private int getId() {
+ return mId;
+ }
+
+ @CalledByNative("ChromiumCameraInfo")
+ private String getDeviceName() {
+ return "camera " + mId + ", facing " +
+ (mCameraInfo.facing ==
+ Camera.CameraInfo.CAMERA_FACING_FRONT ? "front" : "back");
+ }
+
+ @CalledByNative("ChromiumCameraInfo")
+ private int getOrientation() {
+ return mCameraInfo.orientation;
+ }
+ }
+
+ private native void nativeOnFrameAvailable(
+ int nativeVideoCaptureDeviceAndroid,
+ byte[] data,
+ int length,
+ int rotation,
+ boolean flipVertical,
+ boolean flipHorizontal);
+
+ private int getDeviceOrientation() {
+ int orientation = 0;
+ if (mContext != null) {
+ WindowManager wm = (WindowManager)mContext.getSystemService(
+ Context.WINDOW_SERVICE);
+ switch(wm.getDefaultDisplay().getRotation()) {
+ case Surface.ROTATION_90:
+ orientation = 90;
+ break;
+ case Surface.ROTATION_180:
+ orientation = 180;
+ break;
+ case Surface.ROTATION_270:
+ orientation = 270;
+ break;
+ case Surface.ROTATION_0:
+ default:
+ orientation = 0;
+ break;
+ }
+ }
+ return orientation;
+ }
+
+ private void calculateImageFormat(int width, int height) {
+ mImageFormat = DeviceImageFormatHack.getImageFormat();
+ if (mImageFormat == ImageFormat.NV21) {
+ mColorPlane = new byte[width * height / 4];
+ }
+ }
+
+ private void convertNV21ToYV12(byte[] data) {
+ final int ySize = mCurrentCapability.mWidth * mCurrentCapability.mHeight;
+ final int uvSize = ySize / 4;
+ for (int i = 0; i < uvSize; i++) {
+ final int index = ySize + i * 2;
+ data[ySize + i] = data[index];
+ mColorPlane[i] = data[index + 1];
+ }
+ System.arraycopy(mColorPlane, 0, data, ySize + uvSize, uvSize);
+ }
+}
diff --git a/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java b/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
new file mode 100644
index 00000000000..1de7e42b8d2
--- /dev/null
+++ b/chromium/media/base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java
@@ -0,0 +1,186 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.media;
+
+import android.content.Context;
+import android.media.AudioFormat;
+import android.media.MediaCodec;
+import android.media.MediaCodec.BufferInfo;
+import android.media.MediaExtractor;
+import android.media.MediaFormat;
+import android.os.ParcelFileDescriptor;
+import android.util.Log;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+
+import org.chromium.base.CalledByNative;
+import org.chromium.base.JNINamespace;
+
+@JNINamespace("media")
+class WebAudioMediaCodecBridge {
+ private static final boolean DEBUG = true;
+ static final String LOG_TAG = "WebAudioMediaCodec";
+ // TODO(rtoy): What is the correct timeout value for reading
+ // from a file in memory?
+ static final long TIMEOUT_MICROSECONDS = 500;
+ @CalledByNative
+ private static String CreateTempFile(Context ctx) throws java.io.IOException {
+ File outputDirectory = ctx.getCacheDir();
+ File outputFile = File.createTempFile("webaudio", ".dat", outputDirectory);
+ return outputFile.getAbsolutePath();
+ }
+
+ @CalledByNative
+ private static boolean decodeAudioFile(Context ctx,
+ int nativeMediaCodecBridge,
+ int inputFD,
+ long dataSize) {
+
+ if (dataSize < 0 || dataSize > 0x7fffffff)
+ return false;
+
+ MediaExtractor extractor = new MediaExtractor();
+
+ ParcelFileDescriptor encodedFD;
+ encodedFD = ParcelFileDescriptor.adoptFd(inputFD);
+ try {
+ extractor.setDataSource(encodedFD.getFileDescriptor(), 0, dataSize);
+ } catch (Exception e) {
+ e.printStackTrace();
+ encodedFD.detachFd();
+ return false;
+ }
+
+ if (extractor.getTrackCount() <= 0) {
+ encodedFD.detachFd();
+ return false;
+ }
+
+ MediaFormat format = extractor.getTrackFormat(0);
+
+ // Number of channels specified in the file
+ int inputChannelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+
+ // Number of channels the decoder will provide. (Not
+ // necessarily the same as inputChannelCount. See
+ // crbug.com/266006.)
+ int outputChannelCount = inputChannelCount;
+
+ int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
+ String mime = format.getString(MediaFormat.KEY_MIME);
+
+ long durationMicroseconds = 0;
+ if (format.containsKey(MediaFormat.KEY_DURATION)) {
+ try {
+ durationMicroseconds = format.getLong(MediaFormat.KEY_DURATION);
+ } catch (Exception e) {
+ Log.d(LOG_TAG, "Cannot get duration");
+ }
+ }
+
+ if (DEBUG) {
+ Log.d(LOG_TAG, "Tracks: " + extractor.getTrackCount()
+ + " Rate: " + sampleRate
+ + " Channels: " + inputChannelCount
+ + " Mime: " + mime
+ + " Duration: " + durationMicroseconds + " microsec");
+ }
+
+ nativeInitializeDestination(nativeMediaCodecBridge,
+ inputChannelCount,
+ sampleRate,
+ durationMicroseconds);
+
+ // Create decoder
+ MediaCodec codec = MediaCodec.createDecoderByType(mime);
+ codec.configure(format, null /* surface */, null /* crypto */, 0 /* flags */);
+ codec.start();
+
+ ByteBuffer[] codecInputBuffers = codec.getInputBuffers();
+ ByteBuffer[] codecOutputBuffers = codec.getOutputBuffers();
+
+ // A track must be selected and will be used to read samples.
+ extractor.selectTrack(0);
+
+ boolean sawInputEOS = false;
+ boolean sawOutputEOS = false;
+
+ // Keep processing until the output is done.
+ while (!sawOutputEOS) {
+ if (!sawInputEOS) {
+ // Input side
+ int inputBufIndex = codec.dequeueInputBuffer(TIMEOUT_MICROSECONDS);
+
+ if (inputBufIndex >= 0) {
+ ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
+ int sampleSize = extractor.readSampleData(dstBuf, 0);
+ long presentationTimeMicroSec = 0;
+
+ if (sampleSize < 0) {
+ sawInputEOS = true;
+ sampleSize = 0;
+ } else {
+ presentationTimeMicroSec = extractor.getSampleTime();
+ }
+
+ codec.queueInputBuffer(inputBufIndex,
+ 0, /* offset */
+ sampleSize,
+ presentationTimeMicroSec,
+ sawInputEOS ? MediaCodec.BUFFER_FLAG_END_OF_STREAM : 0);
+
+ if (!sawInputEOS) {
+ extractor.advance();
+ }
+ }
+ }
+
+ // Output side
+ MediaCodec.BufferInfo info = new BufferInfo();
+ final int outputBufIndex = codec.dequeueOutputBuffer(info, TIMEOUT_MICROSECONDS);
+
+ if (outputBufIndex >= 0) {
+ ByteBuffer buf = codecOutputBuffers[outputBufIndex];
+
+ if (info.size > 0) {
+ nativeOnChunkDecoded(nativeMediaCodecBridge, buf, info.size,
+ inputChannelCount, outputChannelCount);
+ }
+
+ buf.clear();
+ codec.releaseOutputBuffer(outputBufIndex, false /* render */);
+
+ if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
+ sawOutputEOS = true;
+ }
+ } else if (outputBufIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
+ codecOutputBuffers = codec.getOutputBuffers();
+ } else if (outputBufIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
+ MediaFormat newFormat = codec.getOutputFormat();
+ outputChannelCount = newFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+ Log.d(LOG_TAG, "output format changed to " + newFormat);
+ }
+ }
+
+ encodedFD.detachFd();
+
+ codec.stop();
+ codec.release();
+ codec = null;
+
+ return true;
+ }
+
+ private static native void nativeOnChunkDecoded(
+ int nativeWebAudioMediaCodecBridge, ByteBuffer buf, int size,
+ int inputChannelCount, int outputChannelCount);
+
+ private static native void nativeInitializeDestination(
+ int nativeWebAudioMediaCodecBridge,
+ int inputChannelCount,
+ int sampleRate,
+ long durationMicroseconds);
+}
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
new file mode 100644
index 00000000000..ab549367803
--- /dev/null
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -0,0 +1,420 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_codec_bridge.h"
+
+#include <jni.h>
+
+#include "base/android/build_info.h"
+#include "base/android/jni_android.h"
+#include "base/android/jni_array.h"
+#include "base/android/jni_string.h"
+#include "base/basictypes.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/safe_numerics.h"
+#include "base/strings/stringprintf.h"
+#include "jni/MediaCodecBridge_jni.h"
+#include "media/base/bit_reader.h"
+#include "media/base/decrypt_config.h"
+
+using base::android::AttachCurrentThread;
+using base::android::ConvertUTF8ToJavaString;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+enum { kBufferFlagEndOfStream = 4 };
+
+static const char* AudioCodecToMimeType(const AudioCodec codec) {
+ switch (codec) {
+ case kCodecMP3:
+ return "audio/mpeg";
+ case kCodecVorbis:
+ return "audio/vorbis";
+ case kCodecAAC:
+ return "audio/mp4a-latm";
+ default:
+ return NULL;
+ }
+}
+
+static const char* VideoCodecToMimeType(const VideoCodec codec) {
+ switch (codec) {
+ case kCodecH264:
+ return "video/avc";
+ case kCodecVP8:
+ return "video/x-vnd.on2.vp8";
+ default:
+ return NULL;
+ }
+}
+
+static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
+ JNIEnv* env, scoped_ptr<jint[]> native_array, int size) {
+ ScopedJavaLocalRef<jintArray> j_array(env, env->NewIntArray(size));
+ env->SetIntArrayRegion(j_array.obj(), 0, size, native_array.get());
+ return j_array;
+}
+
+// static
+const base::TimeDelta MediaCodecBridge::kTimeOutInfinity =
+ base::TimeDelta::FromMicroseconds(-1);
+
+// static
+const base::TimeDelta MediaCodecBridge::kTimeOutNoWait =
+ base::TimeDelta::FromMicroseconds(0);
+
+// static
+bool MediaCodecBridge::IsAvailable() {
+ // MediaCodec is only available on JB and greater.
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 16;
+}
+
+MediaCodecBridge::MediaCodecBridge(const char* mime) {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+ DCHECK(mime);
+
+ ScopedJavaLocalRef<jstring> j_type = ConvertUTF8ToJavaString(env, mime);
+ j_media_codec_.Reset(Java_MediaCodecBridge_create(
+ env, j_type.obj()));
+}
+
+MediaCodecBridge::~MediaCodecBridge() {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+ Java_MediaCodecBridge_release(env, j_media_codec_.obj());
+}
+
+void MediaCodecBridge::StartInternal() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_start(env, j_media_codec_.obj());
+ GetOutputBuffers();
+}
+
+void MediaCodecBridge::Reset() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_flush(env, j_media_codec_.obj());
+}
+
+void MediaCodecBridge::Stop() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_stop(env, j_media_codec_.obj());
+}
+
+void MediaCodecBridge::GetOutputFormat(int* width, int* height) {
+ JNIEnv* env = AttachCurrentThread();
+
+ *width = Java_MediaCodecBridge_getOutputWidth(env, j_media_codec_.obj());
+ *height = Java_MediaCodecBridge_getOutputHeight(env, j_media_codec_.obj());
+}
+
+size_t MediaCodecBridge::QueueInputBuffer(
+ int index, const uint8* data, int size,
+ const base::TimeDelta& presentation_time) {
+ size_t size_to_copy = FillInputBuffer(index, data, size);
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_queueInputBuffer(
+ env, j_media_codec_.obj(),
+ index, 0, size_to_copy, presentation_time.InMicroseconds(), 0);
+ return size_to_copy;
+}
+
+size_t MediaCodecBridge::QueueSecureInputBuffer(
+ int index, const uint8* data, int data_size, const uint8* key_id,
+ int key_id_size, const uint8* iv, int iv_size,
+ const SubsampleEntry* subsamples, int subsamples_size,
+ const base::TimeDelta& presentation_time) {
+ size_t size_to_copy = FillInputBuffer(index, data, data_size);
+
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_key_id =
+ base::android::ToJavaByteArray(env, key_id, key_id_size);
+ ScopedJavaLocalRef<jbyteArray> j_iv =
+ base::android::ToJavaByteArray(env, iv, iv_size);
+ scoped_ptr<jint[]> native_clear_array(new jint[subsamples_size]);
+ scoped_ptr<jint[]> native_cypher_array(new jint[subsamples_size]);
+ for (int i = 0; i < subsamples_size; ++i) {
+ native_clear_array[i] = subsamples[i].clear_bytes;
+ native_cypher_array[i] = subsamples[i].cypher_bytes;
+ }
+ ScopedJavaLocalRef<jintArray> clear_array = ToJavaIntArray(
+ env, native_clear_array.Pass(), subsamples_size);
+ ScopedJavaLocalRef<jintArray> cypher_array = ToJavaIntArray(
+ env, native_cypher_array.Pass(), subsamples_size);
+
+ Java_MediaCodecBridge_queueSecureInputBuffer(
+ env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
+ clear_array.obj(), cypher_array.obj(), subsamples_size,
+ presentation_time.InMicroseconds());
+
+ return size_to_copy;
+}
+
+void MediaCodecBridge::QueueEOS(int input_buffer_index) {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_queueInputBuffer(
+ env, j_media_codec_.obj(),
+ input_buffer_index, 0, 0, 0, kBufferFlagEndOfStream);
+}
+
+int MediaCodecBridge::DequeueInputBuffer(base::TimeDelta timeout) {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaCodecBridge_dequeueInputBuffer(
+ env, j_media_codec_.obj(), timeout.InMicroseconds());
+}
+
+int MediaCodecBridge::DequeueOutputBuffer(
+ base::TimeDelta timeout, size_t* offset, size_t* size,
+ base::TimeDelta* presentation_time, bool* end_of_stream) {
+ JNIEnv* env = AttachCurrentThread();
+
+ ScopedJavaLocalRef<jobject> result =
+ Java_MediaCodecBridge_dequeueOutputBuffer(env, j_media_codec_.obj(),
+ timeout.InMicroseconds());
+
+ int j_buffer = Java_DequeueOutputResult_index(env, result.obj());
+ if (j_buffer >= 0) {
+ int64 presentation_time_us =
+ Java_DequeueOutputResult_presentationTimeMicroseconds(
+ env, result.obj());
+ int flags = Java_DequeueOutputResult_flags(env, result.obj());
+ *offset = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_offset(env, result.obj()));
+ *size = base::checked_numeric_cast<size_t>(
+ Java_DequeueOutputResult_numBytes(env, result.obj()));
+ *presentation_time =
+ base::TimeDelta::FromMicroseconds(presentation_time_us);
+ *end_of_stream = flags & kBufferFlagEndOfStream;
+ }
+ return j_buffer;
+}
+
+void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+
+ Java_MediaCodecBridge_releaseOutputBuffer(
+ env, j_media_codec_.obj(), index, render);
+}
+
+void MediaCodecBridge::GetOutputBuffers() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_getOutputBuffers(env, j_media_codec_.obj());
+}
+
+size_t MediaCodecBridge::FillInputBuffer(
+ int index, const uint8* data, int size) {
+ JNIEnv* env = AttachCurrentThread();
+
+ ScopedJavaLocalRef<jobject> j_buffer(
+ Java_MediaCodecBridge_getInputBuffer(env, j_media_codec_.obj(), index));
+
+ uint8* direct_buffer =
+ static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj()));
+ int64 buffer_capacity = env->GetDirectBufferCapacity(j_buffer.obj());
+
+ int size_to_copy = (buffer_capacity < size) ? buffer_capacity : size;
+ // TODO(qinmin): Handling the case that not all the data can be copied.
+ DCHECK(size_to_copy == size) <<
+ "Failed to fill all the data into the input buffer. Size to fill: "
+ << size << ". Size filled: " << size_to_copy;
+ if (size_to_copy > 0)
+ memcpy(direct_buffer, data, size_to_copy);
+ return size_to_copy;
+}
+
+AudioCodecBridge::AudioCodecBridge(const char* mime)
+ : MediaCodecBridge(mime) {
+}
+
+bool AudioCodecBridge::Start(
+ const AudioCodec codec, int sample_rate, int channel_count,
+ const uint8* extra_data, size_t extra_data_size, bool play_audio,
+ jobject media_crypto) {
+ JNIEnv* env = AttachCurrentThread();
+ DCHECK(AudioCodecToMimeType(codec));
+
+ ScopedJavaLocalRef<jstring> j_mime =
+ ConvertUTF8ToJavaString(env, AudioCodecToMimeType(codec));
+ ScopedJavaLocalRef<jobject> j_format(
+ Java_MediaCodecBridge_createAudioFormat(
+ env, j_mime.obj(), sample_rate, channel_count));
+ DCHECK(!j_format.is_null());
+
+ if (!ConfigureMediaFormat(j_format.obj(), codec, extra_data, extra_data_size))
+ return false;
+
+ if (!Java_MediaCodecBridge_configureAudio(
+ env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
+ return false;
+ }
+ StartInternal();
+ return true;
+}
+
+bool AudioCodecBridge::ConfigureMediaFormat(
+ jobject j_format, const AudioCodec codec, const uint8* extra_data,
+ size_t extra_data_size) {
+ if (extra_data_size == 0)
+ return true;
+
+ JNIEnv* env = AttachCurrentThread();
+ switch(codec) {
+ case kCodecVorbis:
+ {
+ if (extra_data[0] != 2) {
+ LOG(ERROR) << "Invalid number of vorbis headers before the codec "
+ << "header: " << extra_data[0];
+ return false;
+ }
+
+ size_t header_length[2];
+ // |total_length| keeps track of the total number of bytes before the last
+ // header.
+ size_t total_length = 1;
+ const uint8* current_pos = extra_data;
+ // Calculate the length of the first 2 headers.
+ for (int i = 0; i < 2; ++i) {
+ header_length[i] = 0;
+ while (total_length < extra_data_size) {
+ size_t size = *(++current_pos);
+ total_length += 1 + size;
+ if (total_length > 0x80000000) {
+ LOG(ERROR) << "Vorbis header size too large";
+ return false;
+ }
+ header_length[i] += size;
+ if (size < 0xFF)
+ break;
+ }
+ if (total_length >= extra_data_size) {
+ LOG(ERROR) << "Invalid vorbis header size in the extra data";
+ return false;
+ }
+ }
+ current_pos++;
+ // The first header is identification header.
+ jobject identification_header = env->NewDirectByteBuffer(
+ const_cast<uint8*>(current_pos), header_length[0]);
+ Java_MediaCodecBridge_setCodecSpecificData(
+ env, j_format, 0, identification_header);
+ // The last header is codec header.
+ jobject codec_header = env->NewDirectByteBuffer(
+ const_cast<uint8*>(extra_data + total_length),
+ extra_data_size - total_length);
+ Java_MediaCodecBridge_setCodecSpecificData(
+ env, j_format, 1, codec_header);
+ env->DeleteLocalRef(codec_header);
+ env->DeleteLocalRef(identification_header);
+ break;
+ }
+ case kCodecAAC:
+ {
+ media::BitReader reader(extra_data, extra_data_size);
+
+ // The following code is copied from aac.cc
+ // TODO(qinmin): refactor the code in aac.cc to make it more reusable.
+ uint8 profile = 0;
+ uint8 frequency_index = 0;
+ uint8 channel_config = 0;
+ if (!reader.ReadBits(5, &profile) ||
+ !reader.ReadBits(4, &frequency_index)) {
+ LOG(ERROR) << "Unable to parse AAC header";
+ return false;
+ }
+ if (0xf == frequency_index && !reader.SkipBits(24)) {
+ LOG(ERROR) << "Unable to parse AAC header";
+ return false;
+ }
+ if (!reader.ReadBits(4, &channel_config)) {
+ LOG(ERROR) << "Unable to parse AAC header";
+ return false;
+ }
+
+ if (profile < 1 || profile > 4 || frequency_index == 0xf ||
+ channel_config > 7) {
+ LOG(ERROR) << "Invalid AAC header";
+ return false;
+ }
+ uint8 csd[2];
+ csd[0] = profile << 3 | frequency_index >> 1;
+ csd[1] = (frequency_index & 0x01) << 7 | channel_config << 3;
+ jobject header = env->NewDirectByteBuffer(csd, 2);
+ Java_MediaCodecBridge_setCodecSpecificData(
+ env, j_format, 0, header);
+ // TODO(qinmin): pass an extra variable to this function to determine
+ // whether we need to call this.
+ Java_MediaCodecBridge_setFrameHasADTSHeader(env, j_format);
+ env->DeleteLocalRef(header);
+ break;
+ }
+ default:
+ LOG(ERROR) << "Invalid header encountered for codec: "
+ << AudioCodecToMimeType(codec);
+ return false;
+ }
+ return true;
+}
+
+void AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
+ DCHECK_LE(0, index);
+ int numBytes = base::checked_numeric_cast<int>(size);
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jobject> buf =
+ Java_MediaCodecBridge_getOutputBuffer(env, media_codec(), index);
+ uint8* buffer = static_cast<uint8*>(env->GetDirectBufferAddress(buf.obj()));
+
+ ScopedJavaLocalRef<jbyteArray> byte_array =
+ base::android::ToJavaByteArray(env, buffer, numBytes);
+ Java_MediaCodecBridge_playOutputBuffer(
+ env, media_codec(), byte_array.obj());
+}
+
+void AudioCodecBridge::SetVolume(double volume) {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
+}
+
+VideoCodecBridge::VideoCodecBridge(const char* mime)
+ : MediaCodecBridge(mime) {
+}
+
+bool VideoCodecBridge::Start(
+ const VideoCodec codec, const gfx::Size& size, jobject surface,
+ jobject media_crypto) {
+ JNIEnv* env = AttachCurrentThread();
+ DCHECK(VideoCodecToMimeType(codec));
+
+ ScopedJavaLocalRef<jstring> j_mime =
+ ConvertUTF8ToJavaString(env, VideoCodecToMimeType(codec));
+ ScopedJavaLocalRef<jobject> j_format(
+ Java_MediaCodecBridge_createVideoFormat(
+ env, j_mime.obj(), size.width(), size.height()));
+ DCHECK(!j_format.is_null());
+ if (!Java_MediaCodecBridge_configureVideo(
+ env, media_codec(), j_format.obj(), surface, media_crypto, 0)) {
+ return false;
+ }
+ StartInternal();
+ return true;
+}
+
+AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec codec) {
+ const char* mime = AudioCodecToMimeType(codec);
+ return mime ? new AudioCodecBridge(mime) : NULL;
+}
+
+VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec) {
+ const char* mime = VideoCodecToMimeType(codec);
+ return mime ? new VideoCodecBridge(mime) : NULL;
+}
+
+bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace media
+
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
new file mode 100644
index 00000000000..3469b1804e7
--- /dev/null
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -0,0 +1,165 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_CODEC_BRIDGE_H_
+#define MEDIA_BASE_ANDROID_MEDIA_CODEC_BRIDGE_H_
+
+#include <jni.h>
+#include <string>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/time/time.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/video_decoder_config.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+// This class serves as a bridge for native code to call java functions inside
+// Android MediaCodec class. For more information on Android MediaCodec, check
+// http://developer.android.com/reference/android/media/MediaCodec.html
+// Note: MediaCodec is only available on JB and greater.
+// Use AudioCodecBridge or VideoCodecBridge to create an instance of this
+// object.
+class MEDIA_EXPORT MediaCodecBridge {
+ public:
+ enum DequeueBufferInfo {
+ INFO_OUTPUT_BUFFERS_CHANGED = -3,
+ INFO_OUTPUT_FORMAT_CHANGED = -2,
+ INFO_TRY_AGAIN_LATER = -1,
+ INFO_MEDIA_CODEC_ERROR = -1000,
+ };
+
+ static const base::TimeDelta kTimeOutInfinity;
+ static const base::TimeDelta kTimeOutNoWait;
+
+ // Returns true if MediaCodec is available on the device.
+ static bool IsAvailable();
+
+ virtual ~MediaCodecBridge();
+
+ // Resets both input and output, all indices previously returned in calls to
+ // DequeueInputBuffer() and DequeueOutputBuffer() become invalid.
+ // Please note that this clears all the inputs in the media codec. In other
+ // words, there will be no outputs until new input is provided.
+ void Reset();
+
+ // Finishes the decode/encode session. The instance remains active
+ // and ready to be StartAudio/Video()ed again. HOWEVER, due to the buggy
+ // vendor's implementation , b/8125974, Stop() -> StartAudio/Video() may not
+ // work on some devices. For reliability, Stop() -> delete and recreate new
+ // instance -> StartAudio/Video() is recommended.
+ void Stop();
+
+ // Used for getting output format. This is valid after DequeueInputBuffer()
+ // returns a format change by returning INFO_OUTPUT_FORMAT_CHANGED
+ void GetOutputFormat(int* width, int* height);
+
+ // Submits a byte array to the given input buffer. Call this after getting an
+ // available buffer from DequeueInputBuffer(). Returns the number of bytes
+ // put to the input buffer.
+ size_t QueueInputBuffer(int index, const uint8* data, int size,
+ const base::TimeDelta& presentation_time);
+
+ // Similar to the above call, but submits a buffer that is encrypted.
+ size_t QueueSecureInputBuffer(
+ int index, const uint8* data, int data_size,
+ const uint8* key_id, int key_id_size,
+ const uint8* iv, int iv_size,
+ const SubsampleEntry* subsamples, int subsamples_size,
+ const base::TimeDelta& presentation_time);
+
+ // Submits an empty buffer with a EOS (END OF STREAM) flag.
+ void QueueEOS(int input_buffer_index);
+
+ // Returns an index (>=0) of an input buffer to be filled with valid data,
+ // INFO_TRY_AGAIN_LATER if no such buffer is currently available, or
+ // INFO_MEDIA_CODEC_ERROR if unexpected error happens.
+ // Use kTimeOutInfinity for infinite timeout.
+ int DequeueInputBuffer(base::TimeDelta timeout);
+
+ // Dequeues an output buffer, block at most timeout_us microseconds.
+ // Returns the index of an output buffer that has been successfully decoded
+ // or one of DequeueBufferInfo above.
+ // Use kTimeOutInfinity for infinite timeout.
+ int DequeueOutputBuffer(
+ base::TimeDelta timeout, size_t* offset, size_t* size,
+ base::TimeDelta* presentation_time, bool* end_of_stream);
+
+ // Returns the buffer to the codec. If you previously specified a surface
+ // when configuring this video decoder you can optionally render the buffer.
+ void ReleaseOutputBuffer(int index, bool render);
+
+ // Gets output buffers from media codec and keeps them inside the java class.
+ // To access them, use DequeueOutputBuffer().
+ void GetOutputBuffers();
+
+ static bool RegisterMediaCodecBridge(JNIEnv* env);
+
+ protected:
+ explicit MediaCodecBridge(const char* mime);
+
+ // Calls start() against the media codec instance. Used in StartXXX() after
+ // configuring media codec.
+ void StartInternal();
+
+ jobject media_codec() { return j_media_codec_.obj(); }
+
+ private:
+ // Fills a particular input buffer and returns the size of copied data.
+ size_t FillInputBuffer(int index, const uint8* data, int data_size);
+
+ // Java MediaCodec instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_media_codec_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaCodecBridge);
+};
+
+class AudioCodecBridge : public MediaCodecBridge {
+ public:
+ // Returns an AudioCodecBridge instance if |codec| is supported, or a NULL
+ // pointer otherwise.
+ static AudioCodecBridge* Create(const AudioCodec codec);
+
+ // Start the audio codec bridge.
+ bool Start(const AudioCodec codec, int sample_rate, int channel_count,
+ const uint8* extra_data, size_t extra_data_size,
+ bool play_audio, jobject media_crypto);
+
+ // Play the output buffer. This call must be called after
+ // DequeueOutputBuffer() and before ReleaseOutputBuffer.
+ void PlayOutputBuffer(int index, size_t size);
+
+ // Set the volume of the audio output.
+ void SetVolume(double volume);
+
+ private:
+ explicit AudioCodecBridge(const char* mime);
+
+ // Configure the java MediaFormat object with the extra codec data passed in.
+ bool ConfigureMediaFormat(jobject j_format, const AudioCodec codec,
+ const uint8* extra_data, size_t extra_data_size);
+};
+
+class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
+ public:
+ // Returns an VideoCodecBridge instance if |codec| is supported, or a NULL
+ // pointer otherwise.
+ static VideoCodecBridge* Create(const VideoCodec codec);
+
+ // Start the video codec bridge.
+ // TODO(qinmin): Pass codec specific data if available.
+ bool Start(const VideoCodec codec, const gfx::Size& size, jobject surface,
+ jobject media_crypto);
+
+ private:
+ explicit VideoCodecBridge(const char* mime);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_CODEC_BRIDGE_H_
+
diff --git a/chromium/media/base/android/media_codec_bridge_unittest.cc b/chromium/media/base/android/media_codec_bridge_unittest.cc
new file mode 100644
index 00000000000..ee38e6d1a99
--- /dev/null
+++ b/chromium/media/base/android/media_codec_bridge_unittest.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/test_data_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+// The first frame of
+// http://www.html5rocks.com/en/tutorials/audio/quick/test.mp3
+unsigned char test_mp3[] = {
+ 0xff, 0xfb, 0xd2, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0d, 0x20, 0x00, 0x00, 0x00, 0x2a, 0x7e, 0x40,
+ 0xc0, 0x19, 0x4a, 0x80, 0x0d, 0x60, 0x48, 0x1b, 0x40, 0xf7, 0xbd, 0xb9,
+ 0xd9, 0x40, 0x6f, 0x82, 0x01, 0x8b, 0x17, 0xa0, 0x80, 0xc5, 0x01, 0xad,
+ 0x9a, 0xd3, 0x00, 0x12, 0xc0, 0x72, 0x93, 0x67, 0xd0, 0x03, 0x6f, 0xa4,
+ 0xc0, 0xc3, 0x23, 0xee, 0x9b, 0xc0, 0xcc, 0x02, 0xa0, 0xa1, 0x30, 0x0c,
+ 0x52, 0x2d, 0xfd, 0x6e, 0x08, 0x83, 0x60, 0x40, 0x46, 0x06, 0x4b, 0x20,
+ 0x82, 0x82, 0x7f, 0xd4, 0x81, 0xe7, 0x00, 0x64, 0x20, 0x18, 0xec, 0xc2,
+ 0x06, 0x57, 0x0f, 0x81, 0x93, 0x0b, 0x00, 0x66, 0xe3, 0xb7, 0xe8, 0x32,
+ 0x6e, 0xf0, 0x32, 0xb0, 0x58, 0x0c, 0x7c, 0x3a, 0x03, 0x22, 0x14, 0x80,
+ 0xc9, 0x01, 0x80, 0x30, 0x20, 0x14, 0x0c, 0x96, 0x73, 0xfe, 0x9f, 0x6c,
+ 0x0c, 0xd2, 0x25, 0x0f, 0xdc, 0x0c, 0x32, 0x43, 0x03, 0x27, 0x87, 0xc0,
+ 0xc2, 0xc0, 0x20, 0xfc, 0x42, 0xc5, 0xff, 0xff, 0xd4, 0x80, 0x01, 0x01,
+ 0x80, 0xc3, 0x81, 0x01, 0x95, 0x03, 0x28, 0x82, 0xc0, 0xc3, 0x01, 0xa1,
+ 0x06, 0x81, 0x87, 0xc2, 0x40, 0x64, 0xc1, 0xf0, 0x12, 0x02, 0xff, 0xf6,
+ 0x5b, 0x9f, 0x44, 0xdc, 0xdd, 0x0b, 0x38, 0x59, 0xe0, 0x31, 0x71, 0x60,
+ 0x0c, 0xb4, 0x22, 0x03, 0x3b, 0x96, 0x40, 0xc8, 0x63, 0x90, 0x0a, 0x23,
+ 0x81, 0x9e, 0x4c, 0x20, 0x65, 0xb3, 0x18, 0x19, 0x6c, 0x42, 0x06, 0x36,
+ 0x1d, 0x01, 0x90, 0x87, 0xdf, 0xff, 0xd0, 0x65, 0xa6, 0xea, 0x66, 0xfd,
+ 0x40, 0x0c, 0x48, 0x03, 0x1a, 0x09, 0x01, 0x21, 0x98, 0x19, 0x2c, 0x36,
+ 0x06, 0x43, 0x21, 0x81, 0x92, 0xca, 0x60, 0x64, 0x70, 0xb8, 0x19, 0x20,
+ 0x6c, 0x02, 0x83, 0x80, 0xcb, 0x60, 0x65, 0x32, 0x28, 0x18, 0x64, 0x24,
+ 0x06, 0x3a, 0x0c, 0x00, 0xe1, 0x00, 0x18, 0xd0, 0x35, 0xff, 0xff, 0xff,
+ 0xe8, 0x32, 0xef, 0xb2, 0x90, 0x65, 0xbb, 0xdd, 0x94, 0x82, 0x0b, 0x4c,
+ 0xfa, 0x25, 0xf3, 0x74, 0x13, 0x0f, 0xf8, 0x19, 0x28, 0x84, 0x06, 0x36,
+ 0x11, 0x01, 0x20, 0x80, 0x18, 0xb4, 0x52, 0x0e, 0x15, 0x00, 0x30, 0x50,
+ 0x0c, 0x84, 0x32, 0x03, 0x11, 0x04, 0x03, 0x48, 0x04, 0x00, 0x00, 0x31,
+ 0x21, 0x00, 0x0c, 0x84, 0x18, 0x03, 0x07, 0x85, 0x40, 0xc6, 0xa5, 0x70,
+ 0x32, 0xb8, 0x7c, 0x0c, 0x54, 0x04, 0x00, 0xd0, 0x08, 0x59, 0x58, 0x18,
+ 0x20, 0x14, 0x06, 0x30, 0x30, 0x01, 0x9b, 0x86, 0x00, 0x6b, 0x54, 0xa8,
+ 0x19, 0x8c, 0x2a, 0x06, 0x16, 0x09, 0x01, 0xa0, 0xd0, 0xa0, 0x69, 0x74,
+ 0xb8, 0x19, 0xc4, 0x4a, 0xa3, 0xda, 0x9d, 0x1e, 0x4f, 0x05, 0xc0, 0x5b,
+ 0x0b, 0x03, 0xc2, 0x76, 0xa3, 0x4f, 0xb9, 0x16, 0xc2, 0x70, 0x41, 0x07,
+ 0xa0, 0x84, 0x16, 0x38, 0x4a, 0xc8, 0xaf, 0xee, 0x7f, 0x93, 0xb5, 0x5c,
+ 0x39, 0x1e, 0x29, 0xd9, 0x8c, 0x80, 0xb5, 0x80, 0xe6, 0x85, 0xb2, 0x99,
+ 0x68, 0x85, 0x46, 0x91, 0x60, 0xdb, 0x06, 0xfa, 0x38, 0x7a, 0xc7, 0xac,
+ 0x85, 0xa8, 0xd3, 0xe6, 0x99, 0x3b, 0x66, 0x43, 0x23, 0x1f, 0x84, 0xe1,
+ 0x65, 0x5e, 0xbc, 0x84, 0x18, 0x62, 0xe6, 0x42, 0x0b, 0x82, 0xe4, 0xd3,
+ 0x42, 0xd2, 0x05, 0x81, 0x4e, 0xe4, 0x9f, 0x8c, 0xc8, 0x7f, 0xa3, 0xe0,
+ 0x8d, 0xf1, 0x0f, 0x38, 0xe5, 0x3f, 0xc4, 0x2c, 0x24, 0x65, 0x8d, 0xb9,
+ 0x58, 0xac, 0x39, 0x0e, 0x37, 0x99, 0x2e, 0x85, 0xe0, 0xb7, 0x98, 0x41,
+ 0x20, 0x38, 0x1b, 0x95, 0x07, 0xfa, 0xa8, 0x9c, 0x21, 0x0f, 0x13, 0x8c,
+ 0xa5, 0xc1, 0x76, 0xae, 0x0b, 0xc1, 0x30, 0x27, 0x08, 0xc1, 0xf6, 0x4d,
+ 0xce, 0xb4, 0x41, 0x38, 0x1e, 0x82, 0x10, 0x74, 0x45, 0x91, 0x90, 0xff,
+ 0x41, 0x8b, 0x62, 0x1a, 0x71, 0xb6, 0x45, 0x63, 0x8c, 0xce, 0xb8, 0x54,
+ 0x1b, 0xe8, 0x5d, 0x9e, 0x35, 0x9d, 0x6c, 0xac, 0xe8, 0x83, 0xa1, 0xe9,
+ 0x3f, 0x13, 0x74, 0x11, 0x04, 0x10, 0xf1, 0x37, 0x38, 0xc6, 0x00, 0x60,
+ 0x27, 0x48, 0x38, 0x85, 0x92, 0x76, 0xb7, 0xf3, 0xa7, 0x1c, 0x4b, 0xf9,
+ 0x3b, 0x5a, 0x88, 0xac, 0x60, 0x1b, 0x85, 0x81, 0x16, 0xab, 0x44, 0x17,
+ 0x08, 0x2e, 0x0f, 0xd4, 0xe2, 0xde, 0x49, 0xc9, 0xe1, 0xc0, 0xc0, 0xa0,
+ 0x7e, 0x73, 0xa1, 0x67, 0xf8, 0xf5, 0x9f, 0xc4, 0x21, 0x50, 0x4f, 0x05,
+ 0x2c, 0xfc, 0x5c, 0xaa, 0x85, 0xb0, 0xfa, 0x67, 0x80, 0x7e, 0x0f, 0xfd,
+ 0x92, 0x30, 0xd5, 0xa0, 0xd4, 0x05, 0xdd, 0x06, 0x68, 0x1d, 0x6e, 0x4e,
+ 0x8b, 0x79, 0xd6, 0xfc, 0xff, 0x2e, 0x6e, 0x7c, 0xba, 0x03, 0x90, 0xd4,
+ 0x25, 0x65, 0x8e, 0xe7, 0x3a, 0xd1, 0xd6, 0xdc, 0xf0, 0xbe, 0x12, 0xc4,
+ 0x31, 0x08, 0x16, 0x70, 0x31, 0x85, 0x61, 0x38, 0x27, 0x0a, 0x91, 0x5f,
+ 0x03, 0x38, 0xeb, 0x37, 0x13, 0x48, 0x41, 0xbe, 0x7f, 0x04, 0x70, 0x62,
+ 0x2b, 0x15, 0x91, 0x67, 0x63, 0x4f, 0xad, 0xa7, 0x1d, 0x3f, 0x44, 0x17,
+ 0x02, 0x08, 0x0d, 0xf2, 0xfc, 0x03, 0xa0, 0x74, 0x21, 0x8b, 0x07, 0x3a,
+ 0x8d, 0x0f, 0x54, 0x58, 0x94, 0x12, 0xc5, 0x62, 0x18, 0xb9, 0x42, 0xf0,
+ 0x6c, 0x73, 0xa0, 0x92, 0xad, 0x27, 0x1c, 0x20, 0x0f, 0xc1, 0xca, 0x44,
+ 0x87, 0x47, 0xc5, 0x43, 0x23, 0x01, 0xda, 0x23, 0xe2, 0x89, 0x38, 0x9f,
+ 0x1f, 0x8d, 0x8c, 0xc6, 0x95, 0xa3, 0x34, 0x21, 0x21, 0x2d, 0x49, 0xea,
+ 0x4b, 0x05, 0x85, 0xf5, 0x58, 0x25, 0x13, 0xcd, 0x51, 0x19, 0x1a, 0x88,
+ 0xa6, 0x83, 0xd6, 0xd0, 0xbc, 0x25, 0x19, 0x1c, 0x92, 0x12, 0x44, 0x5d,
+ 0x1c, 0x04, 0xf1, 0x99, 0xdf, 0x92, 0x8e, 0x09, 0x85, 0xf3, 0x88, 0x82,
+ 0x4c, 0x22, 0x17, 0xc5, 0x25, 0x23, 0xed, 0x78, 0xf5, 0x41, 0xd1, 0xe9,
+ 0x8a, 0xb3, 0x52, 0xd1, 0x3d, 0x79, 0x81, 0x4d, 0x31, 0x24, 0xf9, 0x38,
+ 0x96, 0xbc, 0xf4, 0x8c, 0x25, 0xe9, 0xf2, 0x73, 0x94, 0x85, 0xc2, 0x61,
+ 0x6a, 0x34, 0x68, 0x65, 0x78, 0x87, 0xa6, 0x4f
+};
+
+} // namespace
+
+namespace media {
+
+static const int kPresentationTimeBase = 100;
+
+void DecodeMediaFrame(
+ VideoCodecBridge* media_codec, const uint8* data, size_t data_size,
+ const base::TimeDelta input_presentation_timestamp,
+ const base::TimeDelta initial_timestamp_lower_bound) {
+ base::TimeDelta input_pts = input_presentation_timestamp;
+ base::TimeDelta timestamp = initial_timestamp_lower_bound;
+ base::TimeDelta new_timestamp;
+ for (int i = 0; i < 10; ++i) {
+ int input_buf_index = media_codec->DequeueInputBuffer(
+ MediaCodecBridge::kTimeOutInfinity);
+ media_codec->QueueInputBuffer(
+ input_buf_index, data, data_size, input_presentation_timestamp);
+ size_t unused_offset = 0;
+ size_t size = 0;
+ bool eos = false;
+ int output_buf_index = media_codec->DequeueOutputBuffer(
+ MediaCodecBridge::kTimeOutInfinity,
+ &unused_offset, &size, &new_timestamp, &eos);
+ if (output_buf_index > 0)
+ media_codec->ReleaseOutputBuffer(output_buf_index, false);
+ // Output time stamp should not be smaller than old timestamp.
+ ASSERT_TRUE(new_timestamp >= timestamp);
+ input_pts += base::TimeDelta::FromMicroseconds(33000);
+ timestamp = new_timestamp;
+ }
+}
+
+TEST(MediaCodecBridgeTest, Initialize) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ scoped_ptr<media::MediaCodecBridge> media_codec;
+ media_codec.reset(VideoCodecBridge::Create(kCodecH264));
+}
+
+TEST(MediaCodecBridgeTest, DoNormal) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ scoped_ptr<media::AudioCodecBridge> media_codec;
+ media_codec.reset(AudioCodecBridge::Create(kCodecMP3));
+
+ media_codec->Start(kCodecMP3, 44100, 2, NULL, 0, false, NULL);
+
+ int input_buf_index = media_codec->DequeueInputBuffer(
+ MediaCodecBridge::kTimeOutInfinity);
+ ASSERT_GE(input_buf_index, 0);
+
+ int64 input_pts = kPresentationTimeBase;
+ media_codec->QueueInputBuffer(
+ input_buf_index, test_mp3, sizeof(test_mp3),
+ base::TimeDelta::FromMicroseconds(++input_pts));
+
+ input_buf_index = media_codec->DequeueInputBuffer(
+ MediaCodecBridge::kTimeOutInfinity);
+ media_codec->QueueInputBuffer(
+ input_buf_index, test_mp3, sizeof(test_mp3),
+ base::TimeDelta::FromMicroseconds(++input_pts));
+
+ input_buf_index = media_codec->DequeueInputBuffer(
+ MediaCodecBridge::kTimeOutInfinity);
+ media_codec->QueueEOS(input_buf_index);
+
+ input_pts = kPresentationTimeBase;
+ bool eos = false;
+ while (!eos) {
+ size_t unused_offset = 0;
+ size_t size = 0;
+ base::TimeDelta timestamp;
+ int output_buf_index = media_codec->DequeueOutputBuffer(
+ MediaCodecBridge::kTimeOutInfinity,
+ &unused_offset, &size, &timestamp, &eos);
+ switch (output_buf_index) {
+ case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
+ FAIL();
+ return;
+
+ case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
+ continue;
+
+ case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
+ media_codec->GetOutputBuffers();
+ continue;
+ }
+ EXPECT_LE(1u, size);
+ if (!eos)
+ EXPECT_EQ(++input_pts, timestamp.InMicroseconds());
+ ASSERT_LE(input_pts, kPresentationTimeBase + 2);
+ }
+ ASSERT_EQ(input_pts, kPresentationTimeBase + 2);
+}
+
+TEST(MediaCodecBridgeTest, InvalidVorbisHeader) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ scoped_ptr<media::AudioCodecBridge> media_codec;
+ media_codec.reset(AudioCodecBridge::Create(kCodecVorbis));
+
+ // The first byte of the header is not 0x02.
+ uint8 invalid_first_byte[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
+ EXPECT_FALSE(media_codec->Start(
+ kCodecVorbis, 44100, 2, invalid_first_byte, sizeof(invalid_first_byte),
+ false, NULL));
+
+ // Size of the header does not match with the data we passed in.
+ uint8 invalid_size[] = { 0x02, 0x01, 0xff, 0x01, 0xff };
+ EXPECT_FALSE(media_codec->Start(
+ kCodecVorbis, 44100, 2, invalid_size, sizeof(invalid_size), false, NULL));
+
+ // Size of the header is too large.
+ size_t large_size = 8 * 1024 * 1024 + 2;
+ uint8* very_large_header = new uint8[large_size];
+ very_large_header[0] = 0x02;
+ for (size_t i = 1; i < large_size - 1; ++i)
+ very_large_header[i] = 0xff;
+ very_large_header[large_size - 1] = 0xfe;
+ EXPECT_FALSE(media_codec->Start(
+ kCodecVorbis, 44100, 2, very_large_header, 0x80000000, false, NULL));
+ delete[] very_large_header;
+}
+
+TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ scoped_ptr<VideoCodecBridge> media_codec;
+ media_codec.reset(VideoCodecBridge::Create(kCodecVP8));
+ EXPECT_TRUE(media_codec->Start(
+ kCodecVP8, gfx::Size(320, 240), NULL, NULL));
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("vp8-I-frame-320x240");
+ DecodeMediaFrame(
+ media_codec.get(), buffer->data(), buffer->data_size(),
+ base::TimeDelta(), base::TimeDelta());
+
+ // Simulate a seek to 10 seconds, and each chunk has 2 I-frames.
+ std::vector<uint8> chunk(buffer->data(),
+ buffer->data() + buffer->data_size());
+ chunk.insert(chunk.end(), buffer->data(),
+ buffer->data() + buffer->data_size());
+ media_codec->Reset();
+ DecodeMediaFrame(media_codec.get(), &chunk[0], chunk.size(),
+ base::TimeDelta::FromMicroseconds(10000000),
+ base::TimeDelta::FromMicroseconds(9900000));
+
+ // Simulate a seek to 5 seconds.
+ media_codec->Reset();
+ DecodeMediaFrame(media_codec.get(), &chunk[0], chunk.size(),
+ base::TimeDelta::FromMicroseconds(5000000),
+ base::TimeDelta::FromMicroseconds(4900000));
+}
+
+TEST(MediaCodecBridgeTest, CreateUnsupportedCodec) {
+ EXPECT_EQ(NULL, AudioCodecBridge::Create(kUnknownAudioCodec));
+ EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec));
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
new file mode 100644
index 00000000000..9ac62ca6f48
--- /dev/null
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_drm_bridge.h"
+
+#include "base/android/build_info.h"
+#include "base/android/jni_array.h"
+#include "base/android/jni_string.h"
+#include "base/logging.h"
+#include "jni/MediaDrmBridge_jni.h"
+#include "media/base/android/media_player_manager.h"
+
+using base::android::AttachCurrentThread;
+using base::android::ConvertUTF8ToJavaString;
+using base::android::ConvertJavaStringToUTF8;
+using base::android::JavaByteArrayToByteVector;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+static uint32 ReadUint32(const uint8_t* data) {
+ uint32 value = 0;
+ for (int i = 0; i < 4; ++i)
+ value = (value << 8) | data[i];
+ return value;
+}
+
+static uint64 ReadUint64(const uint8_t* data) {
+ uint64 value = 0;
+ for (int i = 0; i < 8; ++i)
+ value = (value << 8) | data[i];
+ return value;
+}
+
+// The structure of an ISO CENC Protection System Specific Header (PSSH) box is
+// as follows. (See ISO/IEC FDIS 23001-7:2011(E).)
+// Note: ISO boxes use big-endian values.
+//
+// PSSH {
+// uint32 Size
+// uint32 Type
+// uint64 LargeSize # Field is only present if value(Size) == 1.
+// uint32 VersionAndFlags
+// uint8[16] SystemId
+// uint32 DataSize
+// uint8[DataSize] Data
+// }
+static const int kBoxHeaderSize = 8; // Box's header contains Size and Type.
+static const int kBoxLargeSizeSize = 8;
+static const int kPsshVersionFlagSize = 4;
+static const int kPsshSystemIdSize = 16;
+static const int kPsshDataSizeSize = 4;
+static const uint32 kTencType = 0x74656e63;
+static const uint32 kPsshType = 0x70737368;
+
+// Tries to find a PSSH box whose "SystemId" is |uuid| in |data|, parses the
+// "Data" of the box and put it in |pssh_data|. Returns true if such a box is
+// found and successfully parsed. Returns false otherwise.
+// Notes:
+// 1, If multiple PSSH boxes are found,the "Data" of the first matching PSSH box
+// will be set in |pssh_data|.
+// 2, Only PSSH and TENC boxes are allowed in |data|. TENC boxes are skipped.
+static bool GetPsshData(const uint8* data, int data_size,
+ const std::vector<uint8>& uuid,
+ std::vector<uint8>* pssh_data) {
+ const uint8* cur = data;
+ const uint8* data_end = data + data_size;
+ int bytes_left = data_size;
+
+ while (bytes_left > 0) {
+ const uint8* box_head = cur;
+
+ if (bytes_left < kBoxHeaderSize)
+ return false;
+
+ uint64_t box_size = ReadUint32(cur);
+ uint32 type = ReadUint32(cur + 4);
+ cur += kBoxHeaderSize;
+ bytes_left -= kBoxHeaderSize;
+
+ if (box_size == 1) { // LargeSize is present.
+ if (bytes_left < kBoxLargeSizeSize)
+ return false;
+
+ box_size = ReadUint64(cur);
+ cur += kBoxLargeSizeSize;
+ bytes_left -= kBoxLargeSizeSize;
+ } else if (box_size == 0) {
+ box_size = bytes_left + kBoxHeaderSize;
+ }
+
+ const uint8* box_end = box_head + box_size;
+ if (data_end < box_end)
+ return false;
+
+ if (type == kTencType) {
+ // Skip 'tenc' box.
+ cur = box_end;
+ bytes_left = data_end - cur;
+ continue;
+ } else if (type != kPsshType) {
+ return false;
+ }
+
+ const int kPsshBoxMinimumSize =
+ kPsshVersionFlagSize + kPsshSystemIdSize + kPsshDataSizeSize;
+ if (box_end < cur + kPsshBoxMinimumSize)
+ return false;
+
+ uint32 version_and_flags = ReadUint32(cur);
+ cur += kPsshVersionFlagSize;
+ bytes_left -= kPsshVersionFlagSize;
+ if (version_and_flags != 0)
+ return false;
+
+ DCHECK_GE(bytes_left, kPsshSystemIdSize);
+ if (!std::equal(uuid.begin(), uuid.end(), cur)) {
+ cur = box_end;
+ bytes_left = data_end - cur;
+ continue;
+ }
+
+ cur += kPsshSystemIdSize;
+ bytes_left -= kPsshSystemIdSize;
+
+ uint32 data_size = ReadUint32(cur);
+ cur += kPsshDataSizeSize;
+ bytes_left -= kPsshDataSizeSize;
+
+ if (box_end < cur + data_size)
+ return false;
+
+ pssh_data->assign(cur, cur + data_size);
+ return true;
+ }
+
+ return false;
+}
+
+// static
+MediaDrmBridge* MediaDrmBridge::Create(int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ MediaPlayerManager* manager) {
+ if (!IsAvailable() || scheme_uuid.empty())
+ return NULL;
+
+ // TODO(qinmin): check whether the uuid is valid.
+ return new MediaDrmBridge(media_keys_id, scheme_uuid, manager);
+}
+
+bool MediaDrmBridge::IsAvailable() {
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 18;
+}
+
+bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+MediaDrmBridge::MediaDrmBridge(int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ MediaPlayerManager* manager)
+ : media_keys_id_(media_keys_id),
+ scheme_uuid_(scheme_uuid),
+ manager_(manager) {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+
+ ScopedJavaLocalRef<jbyteArray> j_scheme_uuid =
+ base::android::ToJavaByteArray(env, &scheme_uuid[0], scheme_uuid.size());
+ j_media_drm_.Reset(Java_MediaDrmBridge_create(
+ env, j_scheme_uuid.obj(), reinterpret_cast<intptr_t>(this)));
+}
+
+MediaDrmBridge::~MediaDrmBridge() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaDrmBridge_release(env, j_media_drm_.obj());
+}
+
+bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
+ const uint8* init_data,
+ int init_data_length) {
+ std::vector<uint8> pssh_data;
+ if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
+ return false;
+
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_pssh_data =
+ base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, type);
+ Java_MediaDrmBridge_generateKeyRequest(
+ env, j_media_drm_.obj(), j_pssh_data.obj(), j_mime.obj());
+ return true;
+}
+
+void MediaDrmBridge::AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> j_key_data =
+ base::android::ToJavaByteArray(env, key, key_length);
+ ScopedJavaLocalRef<jstring> j_session_id =
+ ConvertUTF8ToJavaString(env, session_id);
+ Java_MediaDrmBridge_addKey(
+ env, j_media_drm_.obj(), j_session_id.obj(), j_key_data.obj());
+}
+
+ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaDrmBridge_getMediaCrypto(env, j_media_drm_.obj());
+}
+
+void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> j_session_id =
+ ConvertUTF8ToJavaString(env, session_id);
+ Java_MediaDrmBridge_cancelKeyRequest(
+ env, j_media_drm_.obj(), j_session_id.obj());
+}
+
+void MediaDrmBridge::OnKeyMessage(JNIEnv* env,
+ jobject j_media_drm,
+ jstring j_session_id,
+ jbyteArray j_message,
+ jstring j_destination_url) {
+ std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ std::vector<uint8> message;
+ JavaByteArrayToByteVector(env, j_message, &message);
+ std::string destination_url = ConvertJavaStringToUTF8(env, j_destination_url);
+
+ manager_->OnKeyMessage(media_keys_id_, session_id, message, destination_url);
+}
+
+void MediaDrmBridge::OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id) {
+ std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ manager_->OnKeyAdded(media_keys_id_, session_id);
+}
+
+void MediaDrmBridge::OnKeyError(JNIEnv* env, jobject, jstring j_session_id) {
+ std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+ manager_->OnKeyError(media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
new file mode 100644
index 00000000000..26e64372684
--- /dev/null
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
+#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
+
+#include <jni.h>
+#include <string>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+
+namespace media {
+
+class MediaPlayerManager;
+
+// This class provides DRM services for android EME implementation.
+// TODO(qinmin): implement all the functions in this class.
+class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
+ public:
+ virtual ~MediaDrmBridge();
+
+ // Returns a MediaDrmBridge instance if |scheme_uuid| is supported, or a NULL
+ // pointer otherwise.
+ static MediaDrmBridge* Create(int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ MediaPlayerManager* manager);
+
+ // Checks whether MediaDRM is available.
+ static bool IsAvailable();
+
+ static bool RegisterMediaDrmBridge(JNIEnv* env);
+
+ // MediaKeys implementations.
+ virtual bool GenerateKeyRequest(const std::string& type,
+ const uint8* init_data,
+ int init_data_length) OVERRIDE;
+ virtual void AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id) OVERRIDE;
+ virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
+
+ // Called after we got the response for GenerateKeyRequest().
+ void OnKeyMessage(JNIEnv* env, jobject, jstring j_session_id,
+ jbyteArray message, jstring destination_url);
+
+ // Called when key is added.
+ void OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id);
+
+ // Called when error happens.
+ void OnKeyError(JNIEnv* env, jobject, jstring j_session_id);
+
+ // Methods to create and release a MediaCrypto object.
+ base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+
+ int media_keys_id() const { return media_keys_id_; }
+
+ private:
+ MediaDrmBridge(int media_keys_id,
+ const std::vector<uint8>& scheme_uuid,
+ MediaPlayerManager* manager);
+
+ // ID of the MediaKeys object.
+ int media_keys_id_;
+
+ // UUID of the key system.
+ std::vector<uint8> scheme_uuid_;
+
+ // Java MediaDrm instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_media_drm_;
+
+ // Non-owned pointer.
+ MediaPlayerManager* manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
diff --git a/chromium/media/base/android/media_jni_registrar.cc b/chromium/media/base/android/media_jni_registrar.cc
new file mode 100644
index 00000000000..b7d48ca0737
--- /dev/null
+++ b/chromium/media/base/android/media_jni_registrar.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_jni_registrar.h"
+
+#include "base/basictypes.h"
+#include "base/android/jni_android.h"
+#include "base/android/jni_registrar.h"
+
+#include "media/audio/android/audio_manager_android.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "media/base/android/media_player_bridge.h"
+#include "media/base/android/media_player_listener.h"
+#include "media/base/android/webaudio_media_codec_bridge.h"
+#include "media/video/capture/android/video_capture_device_android.h"
+
+namespace media {
+
+static base::android::RegistrationMethod kMediaRegisteredMethods[] = {
+ { "AudioManagerAndroid",
+ AudioManagerAndroid::RegisterAudioManager },
+ { "MediaCodecBridge",
+ MediaCodecBridge::RegisterMediaCodecBridge },
+ { "MediaDrmBridge",
+ MediaDrmBridge::RegisterMediaDrmBridge },
+ { "MediaPlayerBridge",
+ MediaPlayerBridge::RegisterMediaPlayerBridge },
+ { "MediaPlayerListener",
+ MediaPlayerListener::RegisterMediaPlayerListener },
+ { "VideoCaptureDevice",
+ VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice },
+ { "WebAudioMediaCodecBridge",
+ WebAudioMediaCodecBridge::RegisterWebAudioMediaCodecBridge },
+};
+
+bool RegisterJni(JNIEnv* env) {
+ return base::android::RegisterNativeMethods(
+ env, kMediaRegisteredMethods, arraysize(kMediaRegisteredMethods));
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_jni_registrar.h b/chromium/media/base/android/media_jni_registrar.h
new file mode 100644
index 00000000000..7e937028f81
--- /dev/null
+++ b/chromium/media/base/android/media_jni_registrar.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
+#define MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
+
+#include <jni.h>
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Register all JNI bindings necessary for media.
+MEDIA_EXPORT bool RegisterJni(JNIEnv* env);
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_JNI_REGISTRAR_H_
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
new file mode 100644
index 00000000000..c0055069e69
--- /dev/null
+++ b/chromium/media/base/android/media_player_android.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_player_android.h"
+
+#include "base/logging.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "media/base/android/media_player_manager.h"
+
+namespace media {
+
+MediaPlayerAndroid::MediaPlayerAndroid(
+ int player_id,
+ MediaPlayerManager* manager)
+ : player_id_(player_id),
+ manager_(manager) {
+}
+
+MediaPlayerAndroid::~MediaPlayerAndroid() {}
+
+void MediaPlayerAndroid::OnMediaError(int error_type) {
+ manager_->OnError(player_id_, error_type);
+}
+
+void MediaPlayerAndroid::OnVideoSizeChanged(int width, int height) {
+ manager_->OnVideoSizeChanged(player_id_, width, height);
+}
+
+void MediaPlayerAndroid::OnBufferingUpdate(int percent) {
+ manager_->OnBufferingUpdate(player_id_, percent);
+}
+
+void MediaPlayerAndroid::OnPlaybackComplete() {
+ manager_->OnPlaybackComplete(player_id_);
+}
+
+void MediaPlayerAndroid::OnMediaInterrupted() {
+ manager_->OnMediaInterrupted(player_id_);
+}
+
+void MediaPlayerAndroid::OnSeekComplete() {
+ manager_->OnSeekComplete(player_id_, GetCurrentTime());
+}
+
+void MediaPlayerAndroid::OnTimeUpdated() {
+ manager_->OnTimeUpdate(player_id_, GetCurrentTime());
+}
+
+void MediaPlayerAndroid::OnMediaMetadataChanged(
+ base::TimeDelta duration, int width, int height, bool success) {
+ manager_->OnMediaMetadataChanged(
+ player_id_, duration, width, height, success);
+}
+
+void MediaPlayerAndroid::RequestMediaResourcesFromManager() {
+ if (manager_)
+ manager_->RequestMediaResources(player_id_);
+}
+
+void MediaPlayerAndroid::ReleaseMediaResourcesFromManager() {
+ if (manager_)
+ manager_->ReleaseMediaResources(player_id_);
+}
+
+void MediaPlayerAndroid::DemuxerReady(
+ const MediaPlayerHostMsg_DemuxerReady_Params& params) {
+ NOTREACHED() << "Unexpected ipc received";
+}
+
+void MediaPlayerAndroid::ReadFromDemuxerAck(
+ const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) {
+ NOTREACHED() << "Unexpected ipc received";
+}
+
+void MediaPlayerAndroid::OnSeekRequestAck(unsigned seek_request_id) {
+ NOTREACHED() << "Unexpected ipc received";
+}
+
+void MediaPlayerAndroid::DurationChanged(const base::TimeDelta& duration) {
+ NOTREACHED() << "Unexpected ipc received";
+}
+
+GURL MediaPlayerAndroid::GetUrl() {
+ return GURL();
+}
+
+GURL MediaPlayerAndroid::GetFirstPartyForCookies() {
+ return GURL();
+}
+
+void MediaPlayerAndroid::SetDrmBridge(MediaDrmBridge* drm_bridge) {
+ // Not all players support DrmBridge. Do nothing by default.
+ return;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
new file mode 100644
index 00000000000..f1c9c37ee09
--- /dev/null
+++ b/chromium/media/base/android/media_player_android.h
@@ -0,0 +1,141 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_PLAYER_ANDROID_H_
+#define MEDIA_BASE_ANDROID_MEDIA_PLAYER_ANDROID_H_
+
+#include <jni.h>
+#include <string>
+
+#include "base/callback.h"
+#include "base/time/time.h"
+#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/media_export.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "url/gurl.h"
+
+namespace media {
+
+class MediaDrmBridge;
+class MediaPlayerManager;
+
+// This class serves as the base class for different media player
+// implementations on Android. Subclasses need to provide their own
+// MediaPlayerAndroid::Create() implementation.
+class MEDIA_EXPORT MediaPlayerAndroid {
+ public:
+ virtual ~MediaPlayerAndroid();
+
+ // Error types for MediaErrorCB.
+ enum MediaErrorType {
+ MEDIA_ERROR_FORMAT,
+ MEDIA_ERROR_DECODE,
+ MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK,
+ MEDIA_ERROR_INVALID_CODE,
+ };
+
+ // Types of media source that this object will play.
+ enum SourceType {
+ SOURCE_TYPE_URL,
+ SOURCE_TYPE_MSE, // W3C Media Source Extensions
+ SOURCE_TYPE_STREAM, // W3C Media Stream, e.g. getUserMedia().
+ };
+
+ // Construct a MediaPlayerAndroid object with all the needed media player
+ // callbacks. This object needs to call |manager_|'s RequestMediaResources()
+ // before decoding the media stream. This allows |manager_| to track
+ // unused resources and free them when needed. On the other hand, it needs
+ // to call ReleaseMediaResources() when it is done with decoding.
+ static MediaPlayerAndroid* Create(int player_id,
+ const GURL& url,
+ SourceType source_type,
+ const GURL& first_party_for_cookies,
+ bool hide_url_log,
+ MediaPlayerManager* manager);
+
+ // Passing an external java surface object to the player.
+ virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) = 0;
+
+ // Start playing the media.
+ virtual void Start() = 0;
+
+ // Pause the media.
+ virtual void Pause() = 0;
+
+ // Seek to a particular position. When succeeds, OnSeekComplete() will be
+ // called. Otherwise, nothing will happen.
+ virtual void SeekTo(base::TimeDelta time) = 0;
+
+ // Release the player resources.
+ virtual void Release() = 0;
+
+ // Set the player volume.
+ virtual void SetVolume(double volume) = 0;
+
+ // Get the media information from the player.
+ virtual int GetVideoWidth() = 0;
+ virtual int GetVideoHeight() = 0;
+ virtual base::TimeDelta GetDuration() = 0;
+ virtual base::TimeDelta GetCurrentTime() = 0;
+ virtual bool IsPlaying() = 0;
+ virtual bool IsPlayerReady() = 0;
+ virtual bool CanPause() = 0;
+ virtual bool CanSeekForward() = 0;
+ virtual bool CanSeekBackward() = 0;
+ virtual GURL GetUrl();
+ virtual GURL GetFirstPartyForCookies();
+
+ // Methods for DemuxerStreamPlayer.
+ // Informs DemuxerStreamPlayer that the demuxer is ready.
+ virtual void DemuxerReady(
+ const MediaPlayerHostMsg_DemuxerReady_Params& params);
+ // Called when the requested data is received from the demuxer.
+ virtual void ReadFromDemuxerAck(
+ const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params);
+
+ // Called when a seek request is acked by the render process.
+ virtual void OnSeekRequestAck(unsigned seek_request_id);
+
+ // Called when the demuxer has changed the duration.
+ virtual void DurationChanged(const base::TimeDelta& duration);
+
+ // Pass a drm bridge to a player.
+ virtual void SetDrmBridge(MediaDrmBridge* drm_bridge);
+
+ int player_id() { return player_id_; }
+
+ protected:
+ MediaPlayerAndroid(int player_id,
+ MediaPlayerManager* manager);
+
+ // Called when player status changes.
+ virtual void OnMediaError(int error_type);
+ virtual void OnVideoSizeChanged(int width, int height);
+ virtual void OnBufferingUpdate(int percent);
+ virtual void OnPlaybackComplete();
+ virtual void OnSeekComplete();
+ virtual void OnMediaMetadataChanged(
+ base::TimeDelta duration, int width, int height, bool success);
+ virtual void OnMediaInterrupted();
+ virtual void OnTimeUpdated();
+
+ // Request or release decoding resources from |manager_|.
+ virtual void RequestMediaResourcesFromManager();
+ virtual void ReleaseMediaResourcesFromManager();
+
+ MediaPlayerManager* manager() { return manager_; }
+
+ private:
+ // Player ID assigned to this player.
+ int player_id_;
+
+ // Resource manager for all the media players.
+ MediaPlayerManager* manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaPlayerAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_PLAYER_ANDROID_H_
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
new file mode 100644
index 00000000000..342ceaa7902
--- /dev/null
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -0,0 +1,441 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_player_bridge.h"
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_string.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "jni/MediaPlayerBridge_jni.h"
+#include "media/base/android/media_player_manager.h"
+#include "media/base/android/media_resource_getter.h"
+#include "media/base/android/media_source_player.h"
+
+using base::android::ConvertUTF8ToJavaString;
+using base::android::ScopedJavaLocalRef;
+
+// Time update happens every 250ms.
+static const int kTimeUpdateInterval = 250;
+
+// Android MediaMetadataRetriever may fail to extract the metadata from the
+// media under some circumstances. This makes the user unable to perform
+// seek. To solve this problem, we use a temporary duration of 100 seconds when
+// the duration is unknown. And we scale the seek position later when duration
+// is available.
+static const int kTemporaryDuration = 100;
+
+namespace media {
+
+#if !defined(GOOGLE_TV)
+// static
+MediaPlayerAndroid* MediaPlayerAndroid::Create(
+ int player_id,
+ const GURL& url,
+ SourceType source_type,
+ const GURL& first_party_for_cookies,
+ bool hide_url_log,
+ MediaPlayerManager* manager) {
+ if (source_type == SOURCE_TYPE_URL) {
+ MediaPlayerBridge* media_player_bridge = new MediaPlayerBridge(
+ player_id,
+ url,
+ first_party_for_cookies,
+ hide_url_log,
+ manager);
+ media_player_bridge->Initialize();
+ return media_player_bridge;
+ } else {
+ return new MediaSourcePlayer(
+ player_id,
+ manager);
+ }
+}
+#endif
+
+MediaPlayerBridge::MediaPlayerBridge(
+ int player_id,
+ const GURL& url,
+ const GURL& first_party_for_cookies,
+ bool hide_url_log,
+ MediaPlayerManager* manager)
+ : MediaPlayerAndroid(player_id,
+ manager),
+ prepared_(false),
+ pending_play_(false),
+ url_(url),
+ first_party_for_cookies_(first_party_for_cookies),
+ hide_url_log_(hide_url_log),
+ duration_(base::TimeDelta::FromSeconds(kTemporaryDuration)),
+ width_(0),
+ height_(0),
+ can_pause_(true),
+ can_seek_forward_(true),
+ can_seek_backward_(true),
+ weak_this_(this),
+ listener_(base::MessageLoopProxy::current(),
+ weak_this_.GetWeakPtr()) {
+}
+
+MediaPlayerBridge::~MediaPlayerBridge() {
+ Release();
+}
+
+void MediaPlayerBridge::Initialize() {
+ if (url_.SchemeIsFile()) {
+ cookies_.clear();
+ ExtractMediaMetadata(url_.spec());
+ return;
+ }
+
+ media::MediaResourceGetter* resource_getter =
+ manager()->GetMediaResourceGetter();
+ if (url_.SchemeIsFileSystem()) {
+ cookies_.clear();
+ resource_getter->GetPlatformPathFromFileSystemURL(url_, base::Bind(
+ &MediaPlayerBridge::ExtractMediaMetadata, weak_this_.GetWeakPtr()));
+ return;
+ }
+
+ resource_getter->GetCookies(url_, first_party_for_cookies_, base::Bind(
+ &MediaPlayerBridge::OnCookiesRetrieved, weak_this_.GetWeakPtr()));
+}
+
+void MediaPlayerBridge::CreateJavaMediaPlayerBridge() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ j_media_player_bridge_.Reset(Java_MediaPlayerBridge_create(env));
+
+ SetMediaPlayerListener();
+}
+
+void MediaPlayerBridge::SetJavaMediaPlayerBridge(
+ jobject j_media_player_bridge) {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ j_media_player_bridge_.Reset(env, j_media_player_bridge);
+}
+
+void MediaPlayerBridge::SetMediaPlayerListener() {
+ jobject j_context = base::android::GetApplicationContext();
+ DCHECK(j_context);
+
+ listener_.CreateMediaPlayerListener(j_context, j_media_player_bridge_.obj());
+}
+
+void MediaPlayerBridge::SetDuration(base::TimeDelta duration) {
+ duration_ = duration;
+}
+
+void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
+ if (j_media_player_bridge_.is_null()) {
+ if (surface.IsEmpty())
+ return;
+ Prepare();
+ }
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ Java_MediaPlayerBridge_setSurface(
+ env, j_media_player_bridge_.obj(), surface.j_surface().obj());
+}
+
+void MediaPlayerBridge::Prepare() {
+ if (j_media_player_bridge_.is_null())
+ CreateJavaMediaPlayerBridge();
+ if (url_.SchemeIsFileSystem()) {
+ manager()->GetMediaResourceGetter()->GetPlatformPathFromFileSystemURL(
+ url_, base::Bind(&MediaPlayerBridge::SetDataSource,
+ weak_this_.GetWeakPtr()));
+ } else {
+ SetDataSource(url_.spec());
+ }
+}
+
+void MediaPlayerBridge::SetDataSource(const std::string& url) {
+ if (j_media_player_bridge_.is_null())
+ return;
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ // Create a Java String for the URL.
+ ScopedJavaLocalRef<jstring> j_url_string = ConvertUTF8ToJavaString(env, url);
+ ScopedJavaLocalRef<jstring> j_cookies = ConvertUTF8ToJavaString(
+ env, cookies_);
+
+ jobject j_context = base::android::GetApplicationContext();
+ DCHECK(j_context);
+
+ if (Java_MediaPlayerBridge_setDataSource(
+ env, j_media_player_bridge_.obj(), j_context, j_url_string.obj(),
+ j_cookies.obj(), hide_url_log_)) {
+ RequestMediaResourcesFromManager();
+ Java_MediaPlayerBridge_prepareAsync(
+ env, j_media_player_bridge_.obj());
+ } else {
+ OnMediaError(MEDIA_ERROR_FORMAT);
+ }
+}
+
+void MediaPlayerBridge::OnCookiesRetrieved(const std::string& cookies) {
+ cookies_ = cookies;
+ ExtractMediaMetadata(url_.spec());
+}
+
+void MediaPlayerBridge::ExtractMediaMetadata(const std::string& url) {
+ manager()->GetMediaResourceGetter()->ExtractMediaMetadata(
+ url, cookies_, base::Bind(&MediaPlayerBridge::OnMediaMetadataExtracted,
+ weak_this_.GetWeakPtr()));
+}
+
+void MediaPlayerBridge::OnMediaMetadataExtracted(
+ base::TimeDelta duration, int width, int height, bool success) {
+ if (success) {
+ duration_ = duration;
+ width_ = width;
+ height_ = height;
+ }
+ OnMediaMetadataChanged(duration_, width_, height_, success);
+}
+
+void MediaPlayerBridge::Start() {
+ if (j_media_player_bridge_.is_null()) {
+ pending_play_ = true;
+ Prepare();
+ } else {
+ if (prepared_)
+ StartInternal();
+ else
+ pending_play_ = true;
+ }
+}
+
+void MediaPlayerBridge::Pause() {
+ if (j_media_player_bridge_.is_null()) {
+ pending_play_ = false;
+ } else {
+ if (prepared_ && IsPlaying())
+ PauseInternal();
+ else
+ pending_play_ = false;
+ }
+}
+
+bool MediaPlayerBridge::IsPlaying() {
+ if (!prepared_)
+ return pending_play_;
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+ jboolean result = Java_MediaPlayerBridge_isPlaying(
+ env, j_media_player_bridge_.obj());
+ return result;
+}
+
+int MediaPlayerBridge::GetVideoWidth() {
+ if (!prepared_)
+ return width_;
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return Java_MediaPlayerBridge_getVideoWidth(
+ env, j_media_player_bridge_.obj());
+}
+
+int MediaPlayerBridge::GetVideoHeight() {
+ if (!prepared_)
+ return height_;
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return Java_MediaPlayerBridge_getVideoHeight(
+ env, j_media_player_bridge_.obj());
+}
+
+void MediaPlayerBridge::SeekTo(base::TimeDelta time) {
+ // Record the time to seek when OnMediaPrepared() is called.
+ pending_seek_ = time;
+
+ if (j_media_player_bridge_.is_null())
+ Prepare();
+ else if (prepared_)
+ SeekInternal(time);
+}
+
+base::TimeDelta MediaPlayerBridge::GetCurrentTime() {
+ if (!prepared_)
+ return pending_seek_;
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return base::TimeDelta::FromMilliseconds(
+ Java_MediaPlayerBridge_getCurrentPosition(
+ env, j_media_player_bridge_.obj()));
+}
+
+base::TimeDelta MediaPlayerBridge::GetDuration() {
+ if (!prepared_)
+ return duration_;
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return base::TimeDelta::FromMilliseconds(
+ Java_MediaPlayerBridge_getDuration(
+ env, j_media_player_bridge_.obj()));
+}
+
+void MediaPlayerBridge::Release() {
+ if (j_media_player_bridge_.is_null())
+ return;
+
+ time_update_timer_.Stop();
+ if (prepared_)
+ pending_seek_ = GetCurrentTime();
+ prepared_ = false;
+ pending_play_ = false;
+ SetVideoSurface(gfx::ScopedJavaSurface());
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MediaPlayerBridge_release(env, j_media_player_bridge_.obj());
+ j_media_player_bridge_.Reset();
+ ReleaseMediaResourcesFromManager();
+ listener_.ReleaseMediaPlayerListenerResources();
+}
+
+void MediaPlayerBridge::SetVolume(double volume) {
+ if (j_media_player_bridge_.is_null())
+ return;
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+ Java_MediaPlayerBridge_setVolume(
+ env, j_media_player_bridge_.obj(), volume);
+}
+
+void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
+ width_ = width;
+ height_ = height;
+ MediaPlayerAndroid::OnVideoSizeChanged(width, height);
+}
+
+void MediaPlayerBridge::OnPlaybackComplete() {
+ time_update_timer_.Stop();
+ MediaPlayerAndroid::OnPlaybackComplete();
+}
+
+void MediaPlayerBridge::OnMediaInterrupted() {
+ time_update_timer_.Stop();
+ MediaPlayerAndroid::OnMediaInterrupted();
+}
+
+void MediaPlayerBridge::OnMediaPrepared() {
+ if (j_media_player_bridge_.is_null())
+ return;
+
+ prepared_ = true;
+
+ base::TimeDelta dur = duration_;
+ duration_ = GetDuration();
+
+ if (duration_ != dur && 0 != dur.InMilliseconds()) {
+ // Scale the |pending_seek_| according to the new duration.
+ pending_seek_ = base::TimeDelta::FromSeconds(
+ pending_seek_.InSecondsF() * duration_.InSecondsF() / dur.InSecondsF());
+ }
+
+ // If media player was recovered from a saved state, consume all the pending
+ // events.
+ PendingSeekInternal(pending_seek_);
+
+ if (pending_play_) {
+ StartInternal();
+ pending_play_ = false;
+ }
+
+ GetAllowedOperations();
+ OnMediaMetadataChanged(duration_, width_, height_, true);
+}
+
+void MediaPlayerBridge::GetAllowedOperations() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ ScopedJavaLocalRef<jobject> allowedOperations =
+ Java_MediaPlayerBridge_getAllowedOperations(
+ env, j_media_player_bridge_.obj());
+ can_pause_ = Java_AllowedOperations_canPause(env, allowedOperations.obj());
+ can_seek_forward_ = Java_AllowedOperations_canSeekForward(
+ env, allowedOperations.obj());
+ can_seek_backward_ = Java_AllowedOperations_canSeekBackward(
+ env, allowedOperations.obj());
+}
+
+void MediaPlayerBridge::StartInternal() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MediaPlayerBridge_start(env, j_media_player_bridge_.obj());
+ if (!time_update_timer_.IsRunning()) {
+ time_update_timer_.Start(
+ FROM_HERE,
+ base::TimeDelta::FromMilliseconds(kTimeUpdateInterval),
+ this, &MediaPlayerBridge::OnTimeUpdated);
+ }
+}
+
+void MediaPlayerBridge::PauseInternal() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MediaPlayerBridge_pause(env, j_media_player_bridge_.obj());
+ time_update_timer_.Stop();
+}
+
+void MediaPlayerBridge::PendingSeekInternal(base::TimeDelta time) {
+ SeekInternal(time);
+}
+
+void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
+ if (time > duration_)
+ time = duration_;
+
+ // Seeking to an invalid position may cause media player to stuck in an
+ // error state.
+ if (time < base::TimeDelta()) {
+ DCHECK_EQ(-1.0, time.InMillisecondsF());
+ return;
+ }
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ int time_msec = static_cast<int>(time.InMilliseconds());
+ Java_MediaPlayerBridge_seekTo(
+ env, j_media_player_bridge_.obj(), time_msec);
+}
+
+bool MediaPlayerBridge::RegisterMediaPlayerBridge(JNIEnv* env) {
+ bool ret = RegisterNativesImpl(env);
+ DCHECK(g_MediaPlayerBridge_clazz);
+ return ret;
+}
+
+bool MediaPlayerBridge::CanPause() {
+ return can_pause_;
+}
+
+bool MediaPlayerBridge::CanSeekForward() {
+ return can_seek_forward_;
+}
+
+bool MediaPlayerBridge::CanSeekBackward() {
+ return can_seek_backward_;
+}
+
+bool MediaPlayerBridge::IsPlayerReady() {
+ return prepared_;
+}
+
+GURL MediaPlayerBridge::GetUrl() {
+ return url_;
+}
+
+GURL MediaPlayerBridge::GetFirstPartyForCookies() {
+ return first_party_for_cookies_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
new file mode 100644
index 00000000000..85a29604058
--- /dev/null
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -0,0 +1,165 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_PLAYER_BRIDGE_H_
+#define MEDIA_BASE_ANDROID_MEDIA_PLAYER_BRIDGE_H_
+
+#include <jni.h>
+#include <map>
+#include <string>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "media/base/android/media_player_android.h"
+#include "media/base/android/media_player_listener.h"
+#include "url/gurl.h"
+
+namespace media {
+
+class MediaPlayerManager;
+
+// This class serves as a bridge between the native code and Android MediaPlayer
+// Java class. For more information on Android MediaPlayer, check
+// http://developer.android.com/reference/android/media/MediaPlayer.html
+// The actual Android MediaPlayer instance is created lazily when Start(),
+// Pause(), SeekTo() gets called. As a result, media information may not
+// be available until one of those operations is performed. After that, we
+// will cache those information in case the mediaplayer gets released.
+// The class uses the corresponding MediaPlayerBridge Java class to talk to
+// the Android MediaPlayer instance.
+class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
+ public:
+ static bool RegisterMediaPlayerBridge(JNIEnv* env);
+
+ // Construct a MediaPlayerBridge object. This object needs to call |manager|'s
+ // RequestMediaResources() before decoding the media stream. This allows
+ // |manager| to track unused resources and free them when needed. On the other
+ // hand, it needs to call ReleaseMediaResources() when it is done with
+ // decoding. MediaPlayerBridge also forwards Android MediaPlayer callbacks to
+ // the |manager| when needed.
+ MediaPlayerBridge(int player_id,
+ const GURL& url,
+ const GURL& first_party_for_cookies,
+ bool hide_url_log,
+ MediaPlayerManager* manager);
+ virtual ~MediaPlayerBridge();
+
+ // Initialize this object and extract the metadata from the media.
+ virtual void Initialize();
+
+ // MediaPlayerAndroid implementation.
+ virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void SeekTo(base::TimeDelta time) OVERRIDE;
+ virtual void Release() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual int GetVideoWidth() OVERRIDE;
+ virtual int GetVideoHeight() OVERRIDE;
+ virtual base::TimeDelta GetCurrentTime() OVERRIDE;
+ virtual base::TimeDelta GetDuration() OVERRIDE;
+ virtual bool IsPlaying() OVERRIDE;
+ virtual bool CanPause() OVERRIDE;
+ virtual bool CanSeekForward() OVERRIDE;
+ virtual bool CanSeekBackward() OVERRIDE;
+ virtual bool IsPlayerReady() OVERRIDE;
+ virtual GURL GetUrl() OVERRIDE;
+ virtual GURL GetFirstPartyForCookies() OVERRIDE;
+
+ protected:
+ void SetJavaMediaPlayerBridge(jobject j_media_player_bridge);
+ void SetMediaPlayerListener();
+ void SetDuration(base::TimeDelta time);
+
+ // MediaPlayerAndroid implementation.
+ virtual void OnVideoSizeChanged(int width, int height) OVERRIDE;
+ virtual void OnPlaybackComplete() OVERRIDE;
+ virtual void OnMediaInterrupted() OVERRIDE;
+
+ virtual void PendingSeekInternal(base::TimeDelta time);
+
+ // Prepare the player for playback, asynchronously. When succeeds,
+ // OnMediaPrepared() will be called. Otherwise, OnMediaError() will
+ // be called with an error type.
+ virtual void Prepare();
+ void OnMediaPrepared();
+
+ // Create the corresponding Java class instance.
+ virtual void CreateJavaMediaPlayerBridge();
+
+ private:
+ // Set the data source for the media player.
+ void SetDataSource(const std::string& url);
+
+ // Functions that implements media player control.
+ void StartInternal();
+ void PauseInternal();
+ void SeekInternal(base::TimeDelta time);
+
+ // Get allowed operations from the player.
+ void GetAllowedOperations();
+
+ // Callback function passed to |resource_getter_|. Called when the cookies
+ // are retrieved.
+ void OnCookiesRetrieved(const std::string& cookies);
+
+ // Extract the media metadata from a url, asynchronously.
+ // OnMediaMetadataExtracted() will be called when this call finishes.
+ void ExtractMediaMetadata(const std::string& url);
+ void OnMediaMetadataExtracted(base::TimeDelta duration, int width, int height,
+ bool success);
+
+ // Whether the player is prepared for playback.
+ bool prepared_;
+
+ // Pending play event while player is preparing.
+ bool pending_play_;
+
+ // Pending seek time while player is preparing.
+ base::TimeDelta pending_seek_;
+
+ // Url for playback.
+ GURL url_;
+
+ // First party url for cookies.
+ GURL first_party_for_cookies_;
+
+ // Hide url log from media player.
+ bool hide_url_log_;
+
+ // Stats about the media.
+ base::TimeDelta duration_;
+ int width_;
+ int height_;
+
+ // Meta data about actions can be taken.
+ bool can_pause_;
+ bool can_seek_forward_;
+ bool can_seek_backward_;
+
+ // Cookies for |url_|.
+ std::string cookies_;
+
+ // Java MediaPlayerBridge instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_media_player_bridge_;
+
+ base::RepeatingTimer<MediaPlayerBridge> time_update_timer_;
+
+ // Weak pointer passed to |listener_| for callbacks.
+ base::WeakPtrFactory<MediaPlayerBridge> weak_this_;
+
+ // Listener object that listens to all the media player events.
+ MediaPlayerListener listener_;
+
+ friend class MediaPlayerListener;
+ DISALLOW_COPY_AND_ASSIGN(MediaPlayerBridge);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_PLAYER_BRIDGE_H_
diff --git a/chromium/media/base/android/media_player_listener.cc b/chromium/media/base/android/media_player_listener.cc
new file mode 100644
index 00000000000..c26984034ed
--- /dev/null
+++ b/chromium/media/base/android/media_player_listener.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_player_listener.h"
+
+#include "base/android/jni_android.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/android/media_player_bridge.h"
+
+// Auto generated jni class from MediaPlayerListener.java.
+// Check base/android/jni_generator/golden_sample_for_tests_jni.h for example.
+#include "jni/MediaPlayerListener_jni.h"
+
+using base::android::AttachCurrentThread;
+using base::android::CheckException;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+MediaPlayerListener::MediaPlayerListener(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ base::WeakPtr<MediaPlayerBridge> media_player)
+ : message_loop_(message_loop),
+ media_player_(media_player) {
+ DCHECK(message_loop_.get());
+ DCHECK(media_player_);
+}
+
+MediaPlayerListener::~MediaPlayerListener() {}
+
+void MediaPlayerListener::CreateMediaPlayerListener(
+ jobject context, jobject media_player_bridge) {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+ j_media_player_listener_.Reset(
+ Java_MediaPlayerListener_create(
+ env, reinterpret_cast<intptr_t>(this), context, media_player_bridge));
+}
+
+
+void MediaPlayerListener::ReleaseMediaPlayerListenerResources() {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+ if (!j_media_player_listener_.is_null()) {
+ Java_MediaPlayerListener_releaseResources(
+ env, j_media_player_listener_.obj());
+ }
+ j_media_player_listener_.Reset();
+}
+
+void MediaPlayerListener::OnMediaError(
+ JNIEnv* /* env */, jobject /* obj */, jint error_type) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnMediaError, media_player_, error_type));
+}
+
+void MediaPlayerListener::OnVideoSizeChanged(
+ JNIEnv* /* env */, jobject /* obj */, jint width, jint height) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnVideoSizeChanged, media_player_,
+ width, height));
+}
+
+void MediaPlayerListener::OnBufferingUpdate(
+ JNIEnv* /* env */, jobject /* obj */, jint percent) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnBufferingUpdate, media_player_, percent));
+}
+
+void MediaPlayerListener::OnPlaybackComplete(
+ JNIEnv* /* env */, jobject /* obj */) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnPlaybackComplete, media_player_));
+}
+
+void MediaPlayerListener::OnSeekComplete(
+ JNIEnv* /* env */, jobject /* obj */) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnSeekComplete, media_player_));
+}
+
+void MediaPlayerListener::OnMediaPrepared(
+ JNIEnv* /* env */, jobject /* obj */) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnMediaPrepared, media_player_));
+}
+
+void MediaPlayerListener::OnMediaInterrupted(
+ JNIEnv* /* env */, jobject /* obj */) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaPlayerBridge::OnMediaInterrupted, media_player_));
+}
+
+bool MediaPlayerListener::RegisterMediaPlayerListener(JNIEnv* env) {
+ bool ret = RegisterNativesImpl(env);
+ DCHECK(g_MediaPlayerListener_clazz);
+ return ret;
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_player_listener.h b/chromium/media/base/android/media_player_listener.h
new file mode 100644
index 00000000000..698493b0f33
--- /dev/null
+++ b/chromium/media/base/android/media_player_listener.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_PLAYER_LISTENER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_PLAYER_LISTENER_H_
+
+#include <jni.h>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class MediaPlayerBridge;
+
+// Acts as a thread proxy between java MediaPlayerListener object and
+// MediaPlayerBridge so that callbacks are posted onto the UI thread.
+class MediaPlayerListener {
+ public:
+ // Construct a native MediaPlayerListener object. Callbacks from the java
+ // side object will be forwarded to |media_player| by posting a task on the
+ // |message_loop|.
+ MediaPlayerListener(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ base::WeakPtr<MediaPlayerBridge> media_player);
+ virtual ~MediaPlayerListener();
+
+ // Called by the Java MediaPlayerListener and mirrored to corresponding
+ // callbacks.
+ void OnMediaError(JNIEnv* /* env */, jobject /* obj */, jint error_type);
+ void OnVideoSizeChanged(JNIEnv* /* env */, jobject /* obj */,
+ jint width, jint height);
+ void OnBufferingUpdate(JNIEnv* /* env */, jobject /* obj */, jint percent);
+ void OnPlaybackComplete(JNIEnv* /* env */, jobject /* obj */);
+ void OnSeekComplete(JNIEnv* /* env */, jobject /* obj */);
+ void OnMediaPrepared(JNIEnv* /* env */, jobject /* obj */);
+ void OnMediaInterrupted(JNIEnv* /* env */, jobject /* obj */);
+
+ // Create a Java MediaPlayerListener object.
+ void CreateMediaPlayerListener(jobject context, jobject media_player_bridge);
+ void ReleaseMediaPlayerListenerResources();
+
+ // Register MediaPlayerListener in the system library loader.
+ static bool RegisterMediaPlayerListener(JNIEnv* env);
+
+ private:
+ // The message loop where |media_player_| lives.
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ // The MediaPlayerBridge object all the callbacks should be send to.
+ base::WeakPtr<MediaPlayerBridge> media_player_;
+
+ base::android::ScopedJavaGlobalRef<jobject> j_media_player_listener_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaPlayerListener);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_PLAYER_LISTENER_H_
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
new file mode 100644
index 00000000000..a0f5017c13f
--- /dev/null
+++ b/chromium/media/base/android/media_player_manager.h
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_PLAYER_MANAGER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_PLAYER_MANAGER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/media_export.h"
+#include "media/base/media_keys.h"
+
+namespace content {
+class RenderViewHost;
+}
+
+namespace media {
+
+class MediaDrmBridge;
+class MediaPlayerAndroid;
+class MediaResourceGetter;
+
+// This class is responsible for managing active MediaPlayerAndroid objects.
+// Objects implementing this interface a created via
+// MediaPlayerManager::Create(), allowing embedders to provide their
+// implementation.
+class MEDIA_EXPORT MediaPlayerManager {
+ public:
+ // The type of the factory function that returns a new instance of the
+ // MediaPlayerManager implementation.
+ typedef MediaPlayerManager* (*FactoryFunction)(content::RenderViewHost*);
+
+ // Allows to override the default factory function in order to provide
+ // a custom implementation to the RenderViewHost instance.
+ // Must be called from the main thread.
+ static void RegisterFactoryFunction(FactoryFunction factory_function);
+
+ // Returns a new instance of MediaPlayerManager interface implementation.
+ // The returned object is owned by the caller. Must be called on the main
+ // thread.
+ static MediaPlayerManager* Create(content::RenderViewHost* render_view_host);
+
+ virtual ~MediaPlayerManager() {}
+
+ // Called by a MediaPlayerAndroid object when it is going to decode
+ // media streams. This helps the manager object maintain an array
+ // of active MediaPlayerAndroid objects and release the resources
+ // when needed.
+ virtual void RequestMediaResources(int player_id) = 0;
+
+ // Called when a MediaPlayerAndroid object releases all its decoding
+ // resources.
+ virtual void ReleaseMediaResources(int player_id) = 0;
+
+ // Return a pointer to the MediaResourceGetter object.
+ virtual MediaResourceGetter* GetMediaResourceGetter() = 0;
+
+ // Called when time update messages need to be sent. Args: player ID,
+ // current time.
+ virtual void OnTimeUpdate(int player_id, base::TimeDelta current_time) = 0;
+
+ // Called when media metadata changed. Args: player ID, duration of the
+ // media, width, height, whether the metadata is successfully extracted.
+ virtual void OnMediaMetadataChanged(
+ int player_id,
+ base::TimeDelta duration,
+ int width,
+ int height,
+ bool success) = 0;
+
+ // Called when playback completed. Args: player ID.
+ virtual void OnPlaybackComplete(int player_id) = 0;
+
+ // Called when media download was interrupted. Args: player ID.
+ virtual void OnMediaInterrupted(int player_id) = 0;
+
+ // Called when buffering has changed. Args: player ID, percentage
+ // of the media.
+ virtual void OnBufferingUpdate(int player_id, int percentage) = 0;
+
+ // Called when seek completed. Args: player ID, current time.
+ virtual void OnSeekComplete(int player_id, base::TimeDelta current_time) = 0;
+
+ // Called when error happens. Args: player ID, error type.
+ virtual void OnError(int player_id, int error) = 0;
+
+ // Called when video size has changed. Args: player ID, width, height.
+ virtual void OnVideoSizeChanged(int player_id, int width, int height) = 0;
+
+ // Returns the player that's in the fullscreen mode currently.
+ virtual MediaPlayerAndroid* GetFullscreenPlayer() = 0;
+
+ // Returns the player with the specified id.
+ virtual MediaPlayerAndroid* GetPlayer(int player_id) = 0;
+
+ // Release all the players managed by this object.
+ virtual void DestroyAllMediaPlayers() = 0;
+
+ // Callback when DemuxerStreamPlayer wants to read data from the demuxer.
+ virtual void OnReadFromDemuxer(int player_id,
+ media::DemuxerStream::Type type) = 0;
+
+ // Called when player wants the media element to initiate a seek.
+ virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
+ unsigned seek_request_id) = 0;
+
+ // Called when player wants to read the config data from the demuxer.
+ virtual void OnMediaConfigRequest(int player_id) = 0;
+
+ // Get the MediaDrmBridge object for the given media key Id.
+ virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) = 0;
+
+ // Called by the player to get a hardware protected surface.
+ virtual void OnProtectedSurfaceRequested(int player_id) = 0;
+
+ // TODO(xhwang): The following three methods needs to be decoupled from
+ // MediaPlayerManager to support the W3C Working Draft version of the EME
+ // spec.
+
+ // Called when MediaDrmBridge wants to send a KeyAdded.
+ virtual void OnKeyAdded(int media_keys_id,
+ const std::string& session_id) = 0;
+
+ // Called when MediaDrmBridge wants to send a KeyError.
+ virtual void OnKeyError(int media_keys_id,
+ const std::string& session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) = 0;
+
+ // Called when MediaDrmBridge wants to send a KeyMessage.
+ virtual void OnKeyMessage(int media_keys_id,
+ const std::string& session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_PLAYER_MANAGER_H_
diff --git a/chromium/media/base/android/media_resource_getter.cc b/chromium/media/base/android/media_resource_getter.cc
new file mode 100644
index 00000000000..5fdff06b946
--- /dev/null
+++ b/chromium/media/base/android/media_resource_getter.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_resource_getter.h"
+
+namespace media {
+
+MediaResourceGetter::~MediaResourceGetter() {}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_resource_getter.h b/chromium/media/base/android/media_resource_getter.h
new file mode 100644
index 00000000000..ea4eccdf5c5
--- /dev/null
+++ b/chromium/media/base/android/media_resource_getter.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_RESOURCE_GETTER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_RESOURCE_GETTER_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "url/gurl.h"
+
+namespace media {
+
+// Class for asynchronously retrieving resources for a media URL. All callbacks
+// are executed on the caller's thread.
+class MEDIA_EXPORT MediaResourceGetter {
+ public:
+ typedef base::Callback<void(const std::string&)> GetCookieCB;
+ typedef base::Callback<void(const std::string&)> GetPlatformPathCB;
+ typedef base::Callback<void(base::TimeDelta, int, int, bool)>
+ ExtractMediaMetadataCB;
+ virtual ~MediaResourceGetter();
+
+ // Method for getting the cookies for a given URL.
+ virtual void GetCookies(const GURL& url,
+ const GURL& first_party_for_cookies,
+ const GetCookieCB& callback) = 0;
+
+ // Method for getting the platform path from a file system URL.
+ virtual void GetPlatformPathFromFileSystemURL(
+ const GURL& url,
+ const GetPlatformPathCB& callback) = 0;
+
+ // Extract the metadata from a media URL. Once completed, the provided
+ // callback function will be run.
+ virtual void ExtractMediaMetadata(
+ const std::string& url,
+ const std::string& cookies,
+ const ExtractMediaMetadataCB& callback) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_RESOURCE_GETTER_H_
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
new file mode 100644
index 00000000000..19dc446f2e8
--- /dev/null
+++ b/chromium/media/base/android/media_source_player.cc
@@ -0,0 +1,953 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_source_player.h"
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_string.h"
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_drm_bridge.h"
+#include "media/base/android/media_player_manager.h"
+#include "media/base/audio_timestamp_helper.h"
+
+namespace {
+
+// Timeout value for media codec operations. Because the first
+// DequeInputBuffer() can take about 150 milliseconds, use 250 milliseconds
+// here. See b/9357571.
+const int kMediaCodecTimeoutInMilliseconds = 250;
+
+// Use 16bit PCM for audio output. Keep this value in sync with the output
+// format we passed to AudioTrack in MediaCodecBridge.
+const int kBytesPerAudioOutputSample = 2;
+
+class DecoderThread : public base::Thread {
+ public:
+ virtual ~DecoderThread() {}
+ protected:
+ DecoderThread(const char* name) : base::Thread(name) { Start(); }
+};
+
+class AudioDecoderThread : public DecoderThread {
+ public:
+ AudioDecoderThread() : DecoderThread("MediaSource_AudioDecoderThread") {}
+};
+
+class VideoDecoderThread : public DecoderThread {
+ public:
+ VideoDecoderThread() : DecoderThread("MediaSource_VideoDecoderThread") {}
+};
+
+// TODO(qinmin): Check if it is tolerable to use worker pool to handle all the
+// decoding tasks so that we don't need the global threads here.
+// http://crbug.com/245750
+base::LazyInstance<AudioDecoderThread>::Leaky
+ g_audio_decoder_thread = LAZY_INSTANCE_INITIALIZER;
+
+base::LazyInstance<VideoDecoderThread>::Leaky
+ g_video_decoder_thread = LAZY_INSTANCE_INITIALIZER;
+
+}
+
+namespace media {
+
+MediaDecoderJob::MediaDecoderJob(
+ const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ bool is_audio)
+ : ui_loop_(base::MessageLoopProxy::current()),
+ decoder_loop_(decoder_loop),
+ media_codec_bridge_(media_codec_bridge),
+ needs_flush_(false),
+ is_audio_(is_audio),
+ input_eos_encountered_(false),
+ weak_this_(this),
+ is_decoding_(false) {
+}
+
+MediaDecoderJob::~MediaDecoderJob() {}
+
+// Class for managing audio decoding jobs.
+class AudioDecoderJob : public MediaDecoderJob {
+ public:
+ virtual ~AudioDecoderJob() {}
+
+ static AudioDecoderJob* Create(
+ const AudioCodec audio_codec, int sample_rate, int channel_count,
+ const uint8* extra_data, size_t extra_data_size, jobject media_crypto);
+
+ void SetVolume(double volume);
+
+ private:
+ AudioDecoderJob(MediaCodecBridge* media_codec_bridge);
+};
+
+// Class for managing video decoding jobs.
+class VideoDecoderJob : public MediaDecoderJob {
+ public:
+ virtual ~VideoDecoderJob() {}
+
+ static VideoDecoderJob* Create(
+ const VideoCodec video_codec, const gfx::Size& size, jobject surface,
+ jobject media_crypto);
+
+ private:
+ VideoDecoderJob(MediaCodecBridge* media_codec_bridge);
+};
+
+void MediaDecoderJob::Decode(
+ const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback) {
+ DCHECK(!is_decoding_);
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ is_decoding_ = true;
+ decoder_loop_->PostTask(FROM_HERE, base::Bind(
+ &MediaDecoderJob::DecodeInternal, base::Unretained(this), unit,
+ start_time_ticks, start_presentation_timestamp, needs_flush_,
+ callback));
+ needs_flush_ = false;
+}
+
+MediaDecoderJob::DecodeStatus MediaDecoderJob::QueueInputBuffer(
+ const AccessUnit& unit) {
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+ int input_buf_index = media_codec_bridge_->DequeueInputBuffer(timeout);
+ if (input_buf_index == MediaCodecBridge::INFO_MEDIA_CODEC_ERROR)
+ return DECODE_FAILED;
+ if (input_buf_index == MediaCodecBridge::INFO_TRY_AGAIN_LATER)
+ return DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER;
+
+ // TODO(qinmin): skip frames if video is falling far behind.
+ DCHECK(input_buf_index >= 0);
+ if (unit.end_of_stream || unit.data.empty()) {
+ media_codec_bridge_->QueueEOS(input_buf_index);
+ return DECODE_INPUT_END_OF_STREAM;
+ }
+ if (unit.key_id.empty()) {
+ media_codec_bridge_->QueueInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(), unit.timestamp);
+ } else {
+ if (unit.iv.empty() || unit.subsamples.empty()) {
+ LOG(ERROR) << "The access unit doesn't have iv or subsamples while it "
+ << "has key IDs!";
+ return DECODE_FAILED;
+ }
+ media_codec_bridge_->QueueSecureInputBuffer(
+ input_buf_index, &unit.data[0], unit.data.size(),
+ reinterpret_cast<const uint8*>(&unit.key_id[0]), unit.key_id.size(),
+ reinterpret_cast<const uint8*>(&unit.iv[0]), unit.iv.size(),
+ &unit.subsamples[0], unit.subsamples.size(), unit.timestamp);
+ }
+
+ return DECODE_SUCCEEDED;
+}
+
+void MediaDecoderJob::DecodeInternal(
+ const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ bool needs_flush,
+ const MediaDecoderJob::DecoderCallback& callback) {
+ if (needs_flush) {
+ DVLOG(1) << "DecodeInternal needs flush.";
+ input_eos_encountered_ = false;
+ media_codec_bridge_->Reset();
+ }
+
+ DecodeStatus decode_status = DECODE_INPUT_END_OF_STREAM;
+ if (!input_eos_encountered_) {
+ decode_status = QueueInputBuffer(unit);
+ if (decode_status == DECODE_INPUT_END_OF_STREAM) {
+ input_eos_encountered_ = true;
+ } else if (decode_status != DECODE_SUCCEEDED) {
+ ui_loop_->PostTask(FROM_HERE,
+ base::Bind(callback, decode_status,
+ start_presentation_timestamp, 0));
+ return;
+ }
+ }
+
+ size_t offset = 0;
+ size_t size = 0;
+ base::TimeDelta presentation_timestamp;
+ bool end_of_stream = false;
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
+ kMediaCodecTimeoutInMilliseconds);
+ int outputBufferIndex = media_codec_bridge_->DequeueOutputBuffer(
+ timeout, &offset, &size, &presentation_timestamp, &end_of_stream);
+
+ if (end_of_stream)
+ decode_status = DECODE_OUTPUT_END_OF_STREAM;
+ switch (outputBufferIndex) {
+ case MediaCodecBridge::INFO_OUTPUT_BUFFERS_CHANGED:
+ DCHECK(decode_status != DECODE_INPUT_END_OF_STREAM);
+ media_codec_bridge_->GetOutputBuffers();
+ break;
+ case MediaCodecBridge::INFO_OUTPUT_FORMAT_CHANGED:
+ DCHECK(decode_status != DECODE_INPUT_END_OF_STREAM);
+ // TODO(qinmin): figure out what we should do if format changes.
+ decode_status = DECODE_FORMAT_CHANGED;
+ break;
+ case MediaCodecBridge::INFO_TRY_AGAIN_LATER:
+ decode_status = DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER;
+ break;
+ case MediaCodecBridge::INFO_MEDIA_CODEC_ERROR:
+ decode_status = DECODE_FAILED;
+ break;
+ default:
+ DCHECK_LE(0, outputBufferIndex);
+ base::TimeDelta time_to_render;
+ DCHECK(!start_time_ticks.is_null());
+ if (!is_audio_) {
+ time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
+ start_time_ticks + start_presentation_timestamp);
+ }
+ if (time_to_render >= base::TimeDelta()) {
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
+ weak_this_.GetWeakPtr(), outputBufferIndex, size,
+ presentation_timestamp, callback, decode_status),
+ time_to_render);
+ } else {
+ // TODO(qinmin): The codec is lagging behind, need to recalculate the
+ // |start_presentation_timestamp_| and |start_time_ticks_|.
+ DVLOG(1) << (is_audio_ ? "audio " : "video ")
+ << "codec is lagging behind :" << time_to_render.InMicroseconds();
+ ReleaseOutputBuffer(outputBufferIndex, size, presentation_timestamp,
+ callback, decode_status);
+ }
+ return;
+ }
+ ui_loop_->PostTask(FROM_HERE, base::Bind(
+ callback, decode_status, start_presentation_timestamp, 0));
+}
+
+void MediaDecoderJob::ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback, DecodeStatus status) {
+ // TODO(qinmin): Refactor this function. Maybe AudioDecoderJob should provide
+ // its own ReleaseOutputBuffer().
+ if (is_audio_) {
+ static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->PlayOutputBuffer(
+ outputBufferIndex, size);
+ }
+ if (status != DECODE_OUTPUT_END_OF_STREAM || size != 0u)
+ media_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, !is_audio_);
+ ui_loop_->PostTask(FROM_HERE, base::Bind(
+ callback, status, presentation_timestamp, is_audio_ ? size : 0));
+}
+
+void MediaDecoderJob::OnDecodeCompleted() {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ is_decoding_ = false;
+}
+
+void MediaDecoderJob::Flush() {
+ // Do nothing, flush when the next Decode() happens.
+ needs_flush_ = true;
+}
+
+void MediaDecoderJob::Release() {
+ // If |decoding_| is false, there is nothing running on the decoder thread.
+ // So it is safe to delete the MediaDecoderJob on the UI thread. However,
+ // if we post a task to the decoder thread to delete object, then we cannot
+ // immediately pass the surface to a new MediaDecoderJob instance because
+ // the java surface is still owned by the old object. New decoder creation
+ // will be blocked on the UI thread until the previous decoder gets deleted.
+ // This introduces extra latency during config changes, and makes the logic in
+ // MediaSourcePlayer more complicated.
+ //
+ // TODO(qinmin): Figure out the logic to passing the surface to a new
+ // MediaDecoderJob instance after the previous one gets deleted on the decoder
+ // thread.
+ if (is_decoding_ && !decoder_loop_->BelongsToCurrentThread()) {
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ decoder_loop_->DeleteSoon(FROM_HERE, this);
+ } else {
+ delete this;
+ }
+}
+
+VideoDecoderJob* VideoDecoderJob::Create(
+ const VideoCodec video_codec, const gfx::Size& size, jobject surface,
+ jobject media_crypto) {
+ scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::Create(video_codec));
+ if (codec && codec->Start(video_codec, size, surface, media_crypto))
+ return new VideoDecoderJob(codec.release());
+ return NULL;
+}
+
+VideoDecoderJob::VideoDecoderJob(MediaCodecBridge* media_codec_bridge)
+ : MediaDecoderJob(g_video_decoder_thread.Pointer()->message_loop_proxy(),
+ media_codec_bridge,
+ false) {}
+
+AudioDecoderJob* AudioDecoderJob::Create(
+ const AudioCodec audio_codec,
+ int sample_rate,
+ int channel_count,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ jobject media_crypto) {
+ scoped_ptr<AudioCodecBridge> codec(AudioCodecBridge::Create(audio_codec));
+ if (codec && codec->Start(audio_codec, sample_rate, channel_count, extra_data,
+ extra_data_size, true, media_crypto)) {
+ return new AudioDecoderJob(codec.release());
+ }
+ return NULL;
+}
+
+AudioDecoderJob::AudioDecoderJob(MediaCodecBridge* media_codec_bridge)
+ : MediaDecoderJob(g_audio_decoder_thread.Pointer()->message_loop_proxy(),
+ media_codec_bridge,
+ true) {}
+
+void AudioDecoderJob::SetVolume(double volume) {
+ static_cast<AudioCodecBridge*>(media_codec_bridge_.get())->SetVolume(volume);
+}
+
+MediaSourcePlayer::MediaSourcePlayer(
+ int player_id,
+ MediaPlayerManager* manager)
+ : MediaPlayerAndroid(player_id, manager),
+ pending_event_(NO_EVENT_PENDING),
+ seek_request_id_(0),
+ width_(0),
+ height_(0),
+ audio_codec_(kUnknownAudioCodec),
+ video_codec_(kUnknownVideoCodec),
+ num_channels_(0),
+ sampling_rate_(0),
+ audio_finished_(true),
+ video_finished_(true),
+ playing_(false),
+ is_audio_encrypted_(false),
+ is_video_encrypted_(false),
+ volume_(-1.0),
+ clock_(&default_tick_clock_),
+ reconfig_audio_decoder_(false),
+ reconfig_video_decoder_(false),
+ audio_access_unit_index_(0),
+ video_access_unit_index_(0),
+ waiting_for_audio_data_(false),
+ waiting_for_video_data_(false),
+ sync_decoder_jobs_(true),
+ weak_this_(this),
+ drm_bridge_(NULL) {
+}
+
+MediaSourcePlayer::~MediaSourcePlayer() {
+ Release();
+}
+
+void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
+ // Ignore non-empty surface that is unprotected if |is_video_encrypted_| is
+ // true.
+ if (is_video_encrypted_ && !surface.IsEmpty() && !surface.is_protected())
+ return;
+
+ surface_ = surface.Pass();
+ pending_event_ |= SURFACE_CHANGE_EVENT_PENDING;
+ if (pending_event_ & SEEK_EVENT_PENDING) {
+ // Waiting for the seek to finish.
+ return;
+ }
+ // Setting a new surface will require a new MediaCodec to be created.
+ // Request a seek so that the new decoder will decode an I-frame first.
+ // Or otherwise, the new MediaCodec might crash. See b/8950387.
+ pending_event_ |= SEEK_EVENT_PENDING;
+ ProcessPendingEvents();
+}
+
+bool MediaSourcePlayer::Seekable() {
+ // If the duration TimeDelta, converted to milliseconds from microseconds,
+ // is >= 2^31, then the media is assumed to be unbounded and unseekable.
+ // 2^31 is the bound due to java player using 32-bit integer for time
+ // values at millisecond resolution.
+ return duration_ <
+ base::TimeDelta::FromMilliseconds(std::numeric_limits<int32>::max());
+}
+
+void MediaSourcePlayer::Start() {
+ playing_ = true;
+
+ if (is_video_encrypted_)
+ manager()->OnProtectedSurfaceRequested(player_id());
+
+ StartInternal();
+}
+
+void MediaSourcePlayer::Pause() {
+ // Since decoder jobs have their own thread, decoding is not fully paused
+ // until all the decoder jobs call MediaDecoderCallback(). It is possible
+ // that Start() is called while the player is waiting for
+ // MediaDecoderCallback(). In that case, decoding will continue when
+ // MediaDecoderCallback() is called.
+ playing_ = false;
+ start_time_ticks_ = base::TimeTicks();
+}
+
+bool MediaSourcePlayer::IsPlaying() {
+ return playing_;
+}
+
+int MediaSourcePlayer::GetVideoWidth() {
+ return width_;
+}
+
+int MediaSourcePlayer::GetVideoHeight() {
+ return height_;
+}
+
+void MediaSourcePlayer::SeekTo(base::TimeDelta timestamp) {
+ clock_.SetTime(timestamp, timestamp);
+ if (audio_timestamp_helper_)
+ audio_timestamp_helper_->SetBaseTimestamp(timestamp);
+ pending_event_ |= SEEK_EVENT_PENDING;
+ ProcessPendingEvents();
+}
+
+base::TimeDelta MediaSourcePlayer::GetCurrentTime() {
+ return clock_.Elapsed();
+}
+
+base::TimeDelta MediaSourcePlayer::GetDuration() {
+ return duration_;
+}
+
+void MediaSourcePlayer::Release() {
+ ClearDecodingData();
+ audio_decoder_job_.reset();
+ video_decoder_job_.reset();
+ reconfig_audio_decoder_ = false;
+ reconfig_video_decoder_ = false;
+ playing_ = false;
+ pending_event_ = NO_EVENT_PENDING;
+ surface_ = gfx::ScopedJavaSurface();
+ ReleaseMediaResourcesFromManager();
+}
+
+void MediaSourcePlayer::SetVolume(double volume) {
+ volume_ = volume;
+ SetVolumeInternal();
+}
+
+bool MediaSourcePlayer::CanPause() {
+ return Seekable();
+}
+
+bool MediaSourcePlayer::CanSeekForward() {
+ return Seekable();
+}
+
+bool MediaSourcePlayer::CanSeekBackward() {
+ return Seekable();
+}
+
+bool MediaSourcePlayer::IsPlayerReady() {
+ return audio_decoder_job_ || video_decoder_job_;
+}
+
+void MediaSourcePlayer::StartInternal() {
+ // If there are pending events, wait for them finish.
+ if (pending_event_ != NO_EVENT_PENDING)
+ return;
+
+ // Create decoder jobs if they are not created
+ ConfigureAudioDecoderJob();
+ ConfigureVideoDecoderJob();
+
+
+ // If one of the decoder job is not ready, do nothing.
+ if ((HasAudio() && !audio_decoder_job_) ||
+ (HasVideo() && !video_decoder_job_)) {
+ return;
+ }
+
+ audio_finished_ = false;
+ video_finished_ = false;
+ sync_decoder_jobs_ = true;
+ SyncAndStartDecoderJobs();
+}
+
+void MediaSourcePlayer::DemuxerReady(
+ const MediaPlayerHostMsg_DemuxerReady_Params& params) {
+ duration_ = base::TimeDelta::FromMilliseconds(params.duration_ms);
+ clock_.SetDuration(duration_);
+
+ audio_codec_ = params.audio_codec;
+ num_channels_ = params.audio_channels;
+ sampling_rate_ = params.audio_sampling_rate;
+ is_audio_encrypted_ = params.is_audio_encrypted;
+ audio_extra_data_ = params.audio_extra_data;
+ if (HasAudio()) {
+ DCHECK_GT(num_channels_, 0);
+ audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
+ audio_timestamp_helper_->SetBaseTimestamp(GetCurrentTime());
+ } else {
+ audio_timestamp_helper_.reset();
+ }
+
+ video_codec_ = params.video_codec;
+ width_ = params.video_size.width();
+ height_ = params.video_size.height();
+ is_video_encrypted_ = params.is_video_encrypted;
+
+ OnMediaMetadataChanged(duration_, width_, height_, true);
+
+ if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
+ if (reconfig_audio_decoder_)
+ ConfigureAudioDecoderJob();
+
+ // If there is a pending surface change, we can merge it with the config
+ // change.
+ if (reconfig_video_decoder_) {
+ pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
+ ConfigureVideoDecoderJob();
+ }
+ pending_event_ &= ~CONFIG_CHANGE_EVENT_PENDING;
+ if (playing_)
+ StartInternal();
+ }
+}
+
+void MediaSourcePlayer::ReadFromDemuxerAck(
+ const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) {
+ DCHECK_LT(0u, params.access_units.size());
+ if (params.type == DemuxerStream::AUDIO)
+ waiting_for_audio_data_ = false;
+ else
+ waiting_for_video_data_ = false;
+
+ // If there is a pending seek request, ignore the data from the chunk demuxer.
+ // The data will be requested later when OnSeekRequestAck() is called.
+ if (pending_event_ & SEEK_EVENT_PENDING)
+ return;
+
+ if (params.type == DemuxerStream::AUDIO) {
+ DCHECK_EQ(0u, audio_access_unit_index_);
+ received_audio_ = params;
+ } else {
+ DCHECK_EQ(0u, video_access_unit_index_);
+ received_video_ = params;
+ }
+
+ if (pending_event_ != NO_EVENT_PENDING || !playing_)
+ return;
+
+ if (sync_decoder_jobs_) {
+ SyncAndStartDecoderJobs();
+ return;
+ }
+
+ if (params.type == DemuxerStream::AUDIO)
+ DecodeMoreAudio();
+ else
+ DecodeMoreVideo();
+}
+
+void MediaSourcePlayer::DurationChanged(const base::TimeDelta& duration) {
+ duration_ = duration;
+ clock_.SetDuration(duration_);
+}
+
+void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
+ // Currently we don't support DRM change during the middle of playback, even
+ // if the player is paused.
+ // TODO(qinmin): support DRM change after playback has started.
+ // http://crbug.com/253792.
+ if (GetCurrentTime() > base::TimeDelta()) {
+ LOG(INFO) << "Setting DRM bridge after play back has started. "
+ << "This is not well supported!";
+ }
+
+ drm_bridge_ = drm_bridge;
+
+ if (playing_)
+ StartInternal();
+}
+
+void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
+ DVLOG(1) << "OnSeekRequestAck(" << seek_request_id << ")";
+ // Do nothing until the most recent seek request is processed.
+ if (seek_request_id_ != seek_request_id)
+ return;
+ pending_event_ &= ~SEEK_EVENT_PENDING;
+ OnSeekComplete();
+ ProcessPendingEvents();
+}
+
+void MediaSourcePlayer::UpdateTimestamps(
+ const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
+ if (audio_output_bytes > 0) {
+ audio_timestamp_helper_->AddFrames(
+ audio_output_bytes / (kBytesPerAudioOutputSample * num_channels_));
+ clock_.SetMaxTime(audio_timestamp_helper_->GetTimestamp());
+ } else {
+ clock_.SetMaxTime(presentation_timestamp);
+ }
+
+ OnTimeUpdated();
+}
+
+void MediaSourcePlayer::ProcessPendingEvents() {
+ // Wait for all the decoding jobs to finish before processing pending tasks.
+ if ((audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
+ (video_decoder_job_ && video_decoder_job_->is_decoding())) {
+ return;
+ }
+
+ if (pending_event_ & SEEK_EVENT_PENDING) {
+ ClearDecodingData();
+ manager()->OnMediaSeekRequest(
+ player_id(), GetCurrentTime(), ++seek_request_id_);
+ return;
+ }
+
+ start_time_ticks_ = base::TimeTicks();
+ if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
+ DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
+ manager()->OnMediaConfigRequest(player_id());
+ return;
+ }
+
+ if (pending_event_ & SURFACE_CHANGE_EVENT_PENDING) {
+ video_decoder_job_.reset();
+ ConfigureVideoDecoderJob();
+ pending_event_ &= ~SURFACE_CHANGE_EVENT_PENDING;
+ }
+
+ if (playing_)
+ StartInternal();
+}
+
+void MediaSourcePlayer::MediaDecoderCallback(
+ bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
+ if (is_audio && audio_decoder_job_)
+ audio_decoder_job_->OnDecodeCompleted();
+ if (!is_audio && video_decoder_job_)
+ video_decoder_job_->OnDecodeCompleted();
+
+ if (is_audio)
+ decoder_starvation_callback_.Cancel();
+
+ if (decode_status == MediaDecoderJob::DECODE_FAILED) {
+ Release();
+ OnMediaError(MEDIA_ERROR_DECODE);
+ return;
+ }
+
+ // If the input reaches input EOS, there is no need to request new data.
+ if (decode_status != MediaDecoderJob::DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER &&
+ decode_status != MediaDecoderJob::DECODE_INPUT_END_OF_STREAM) {
+ if (is_audio)
+ audio_access_unit_index_++;
+ else
+ video_access_unit_index_++;
+ }
+
+ if (pending_event_ != NO_EVENT_PENDING) {
+ ProcessPendingEvents();
+ return;
+ }
+
+ if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED &&
+ (is_audio || !HasAudio())) {
+ UpdateTimestamps(presentation_timestamp, audio_output_bytes);
+ }
+
+ if (decode_status == MediaDecoderJob::DECODE_OUTPUT_END_OF_STREAM) {
+ PlaybackCompleted(is_audio);
+ return;
+ }
+
+ if (!playing_) {
+ if (is_audio || !HasAudio())
+ clock_.Pause();
+ return;
+ }
+
+ if (sync_decoder_jobs_) {
+ SyncAndStartDecoderJobs();
+ return;
+ }
+
+ base::TimeDelta current_timestamp = GetCurrentTime();
+ if (is_audio) {
+ if (decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
+ base::TimeDelta timeout =
+ audio_timestamp_helper_->GetTimestamp() - current_timestamp;
+ StartStarvationCallback(timeout);
+ }
+ if (!HasAudioData())
+ RequestAudioData();
+ else
+ DecodeMoreAudio();
+ return;
+ }
+
+ if (!HasAudio() && decode_status == MediaDecoderJob::DECODE_SUCCEEDED) {
+ DCHECK(current_timestamp <= presentation_timestamp);
+ // For video only streams, fps can be estimated from the difference
+ // between the previous and current presentation timestamps. The
+ // previous presentation timestamp is equal to current_timestamp.
+ // TODO(qinmin): determine whether 2 is a good coefficient for estimating
+ // video frame timeout.
+ StartStarvationCallback(2 * (presentation_timestamp - current_timestamp));
+ }
+ if (!HasVideoData())
+ RequestVideoData();
+ else
+ DecodeMoreVideo();
+}
+
+void MediaSourcePlayer::DecodeMoreAudio() {
+ DCHECK(!audio_decoder_job_->is_decoding());
+ DCHECK(HasAudioData());
+
+ if (DemuxerStream::kConfigChanged ==
+ received_audio_.access_units[audio_access_unit_index_].status) {
+ // Wait for demuxer ready message.
+ reconfig_audio_decoder_ = true;
+ pending_event_ |= CONFIG_CHANGE_EVENT_PENDING;
+ received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ audio_access_unit_index_ = 0;
+ ProcessPendingEvents();
+ return;
+ }
+
+ audio_decoder_job_->Decode(
+ received_audio_.access_units[audio_access_unit_index_],
+ start_time_ticks_, start_presentation_timestamp_,
+ base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
+ weak_this_.GetWeakPtr(), true));
+}
+
+void MediaSourcePlayer::DecodeMoreVideo() {
+ DVLOG(1) << "DecodeMoreVideo()";
+ DCHECK(!video_decoder_job_->is_decoding());
+ DCHECK(HasVideoData());
+
+ if (DemuxerStream::kConfigChanged ==
+ received_video_.access_units[video_access_unit_index_].status) {
+ // Wait for demuxer ready message.
+ reconfig_video_decoder_ = true;
+ pending_event_ |= CONFIG_CHANGE_EVENT_PENDING;
+ received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ video_access_unit_index_ = 0;
+ ProcessPendingEvents();
+ return;
+ }
+
+ DVLOG(3) << "VideoDecoderJob::Decode(" << video_access_unit_index_ << ", "
+ << start_time_ticks_.ToInternalValue() << ", "
+ << start_presentation_timestamp_.InMilliseconds() << ")";
+ video_decoder_job_->Decode(
+ received_video_.access_units[video_access_unit_index_],
+ start_time_ticks_, start_presentation_timestamp_,
+ base::Bind(&MediaSourcePlayer::MediaDecoderCallback,
+ weak_this_.GetWeakPtr(), false));
+}
+
+void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
+ if (is_audio)
+ audio_finished_ = true;
+ else
+ video_finished_ = true;
+
+ if ((!HasAudio() || audio_finished_) && (!HasVideo() || video_finished_)) {
+ playing_ = false;
+ clock_.Pause();
+ start_time_ticks_ = base::TimeTicks();
+ OnPlaybackComplete();
+ }
+}
+
+void MediaSourcePlayer::ClearDecodingData() {
+ DVLOG(1) << "ClearDecodingData()";
+ if (audio_decoder_job_)
+ audio_decoder_job_->Flush();
+ if (video_decoder_job_)
+ video_decoder_job_->Flush();
+ start_time_ticks_ = base::TimeTicks();
+ received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ audio_access_unit_index_ = 0;
+ video_access_unit_index_ = 0;
+ waiting_for_audio_data_ = false;
+ waiting_for_video_data_ = false;
+}
+
+bool MediaSourcePlayer::HasVideo() {
+ return kUnknownVideoCodec != video_codec_;
+}
+
+bool MediaSourcePlayer::HasAudio() {
+ return kUnknownAudioCodec != audio_codec_;
+}
+
+void MediaSourcePlayer::ConfigureAudioDecoderJob() {
+ if (!HasAudio()) {
+ audio_decoder_job_.reset();
+ return;
+ }
+
+ // Create audio decoder job only if config changes.
+ if (audio_decoder_job_ && !reconfig_audio_decoder_)
+ return;
+
+ base::android::ScopedJavaLocalRef<jobject> media_codec;
+ if (is_audio_encrypted_) {
+ if (drm_bridge_) {
+ media_codec = drm_bridge_->GetMediaCrypto();
+ // TODO(qinmin): currently we assume MediaCrypto is available whenever
+ // MediaDrmBridge is constructed. This will change if we want to support
+ // more general uses cases of EME.
+ DCHECK(!media_codec.is_null());
+ } else {
+ // Don't create the decoder job if |drm_bridge_| is not set,
+ // so StartInternal() will not proceed.
+ LOG(INFO) << "MediaDrmBridge is not available when creating decoder "
+ << "for encrypted audio stream.";
+ return;
+ }
+ }
+
+ audio_decoder_job_.reset(AudioDecoderJob::Create(
+ audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
+ audio_extra_data_.size(), media_codec.obj()));
+
+ if (audio_decoder_job_) {
+ SetVolumeInternal();
+ reconfig_audio_decoder_ = false;
+ }
+}
+
+void MediaSourcePlayer::ConfigureVideoDecoderJob() {
+ if (!HasVideo() || surface_.IsEmpty()) {
+ video_decoder_job_.reset();
+ return;
+ }
+
+ // Create video decoder job only if config changes.
+ if (video_decoder_job_ && !reconfig_video_decoder_)
+ return;
+
+ base::android::ScopedJavaLocalRef<jobject> media_crypto;
+ if (is_video_encrypted_) {
+ if (drm_bridge_) {
+ media_crypto = drm_bridge_->GetMediaCrypto();
+ DCHECK(!media_crypto.is_null());
+ } else {
+ LOG(INFO) << "MediaDrmBridge is not available when creating decoder "
+ << "for encrypted video stream.";
+ return;
+ }
+ }
+
+ // Release the old VideoDecoderJob first so the surface can get released.
+ // Android does not allow 2 MediaCodec instances use the same surface.
+ video_decoder_job_.reset();
+ // Create the new VideoDecoderJob.
+ video_decoder_job_.reset(VideoDecoderJob::Create(
+ video_codec_, gfx::Size(width_, height_), surface_.j_surface().obj(),
+ media_crypto.obj()));
+ if (video_decoder_job_)
+ reconfig_video_decoder_ = false;
+
+ // Inform the fullscreen view the player is ready.
+ // TODO(qinmin): refactor MediaPlayerBridge so that we have a better way
+ // to inform ContentVideoView.
+ OnMediaMetadataChanged(duration_, width_, height_, true);
+}
+
+void MediaSourcePlayer::OnDecoderStarved() {
+ sync_decoder_jobs_ = true;
+}
+
+void MediaSourcePlayer::StartStarvationCallback(
+ const base::TimeDelta& timeout) {
+ decoder_starvation_callback_.Reset(
+ base::Bind(&MediaSourcePlayer::OnDecoderStarved,
+ weak_this_.GetWeakPtr()));
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE, decoder_starvation_callback_.callback(), timeout);
+}
+
+void MediaSourcePlayer::SyncAndStartDecoderJobs() {
+ // For streams with both audio and video, send the request for video too.
+ // However, don't wait for the response so that we won't have lots of
+ // noticeable pauses in the audio. Video will sync with audio by itself.
+ if (HasVideo() && !HasVideoData()) {
+ RequestVideoData();
+ if (!HasAudio())
+ return;
+ }
+ if (HasAudio() && !HasAudioData()) {
+ RequestAudioData();
+ return;
+ }
+ start_time_ticks_ = base::TimeTicks::Now();
+ start_presentation_timestamp_ = GetCurrentTime();
+ if (!clock_.IsPlaying())
+ clock_.Play();
+ if (HasAudioData() && !audio_decoder_job_->is_decoding())
+ DecodeMoreAudio();
+ if (HasVideoData() && !video_decoder_job_->is_decoding())
+ DecodeMoreVideo();
+ sync_decoder_jobs_ = false;
+}
+
+void MediaSourcePlayer::RequestAudioData() {
+ DVLOG(2) << "RequestAudioData()";
+ DCHECK(HasAudio());
+
+ if (waiting_for_audio_data_)
+ return;
+
+ manager()->OnReadFromDemuxer(player_id(), DemuxerStream::AUDIO);
+ received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ audio_access_unit_index_ = 0;
+ waiting_for_audio_data_ = true;
+}
+
+void MediaSourcePlayer::RequestVideoData() {
+ DVLOG(2) << "RequestVideoData()";
+ DCHECK(HasVideo());
+ if (waiting_for_video_data_)
+ return;
+
+ manager()->OnReadFromDemuxer(player_id(), DemuxerStream::VIDEO);
+ received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
+ video_access_unit_index_ = 0;
+ waiting_for_video_data_ = true;
+}
+
+bool MediaSourcePlayer::HasAudioData() const {
+ return audio_access_unit_index_ < received_audio_.access_units.size();
+}
+
+bool MediaSourcePlayer::HasVideoData() const {
+ return video_access_unit_index_ < received_video_.access_units.size();
+}
+
+void MediaSourcePlayer::SetVolumeInternal() {
+ if (audio_decoder_job_ && volume_ >= 0)
+ audio_decoder_job_.get()->SetVolume(volume_);
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
new file mode 100644
index 00000000000..05fd224e04e
--- /dev/null
+++ b/chromium/media/base/android/media_source_player.h
@@ -0,0 +1,313 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_SOURCE_PLAYER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_SOURCE_PLAYER_H_
+
+#include <jni.h>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/callback.h"
+#include "base/cancelable_callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/time.h"
+#include "media/base/android/demuxer_stream_player_params.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_player_android.h"
+#include "media/base/clock.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class AudioDecoderJob;
+class AudioTimestampHelper;
+class VideoDecoderJob;
+
+// Class for managing all the decoding tasks. Each decoding task will be posted
+// onto the same thread. The thread will be stopped once Stop() is called.
+class MediaDecoderJob {
+ public:
+ enum DecodeStatus {
+ DECODE_SUCCEEDED,
+ DECODE_TRY_ENQUEUE_INPUT_AGAIN_LATER,
+ DECODE_TRY_DEQUEUE_OUTPUT_AGAIN_LATER,
+ DECODE_FORMAT_CHANGED,
+ DECODE_INPUT_END_OF_STREAM,
+ DECODE_OUTPUT_END_OF_STREAM,
+ DECODE_FAILED,
+ };
+
+ virtual ~MediaDecoderJob();
+
+ // Callback when a decoder job finishes its work. Args: whether decode
+ // finished successfully, presentation time, audio output bytes.
+ typedef base::Callback<void(DecodeStatus, const base::TimeDelta&,
+ size_t)> DecoderCallback;
+
+ // Called by MediaSourcePlayer to decode some data.
+ void Decode(const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback);
+
+ // Flush the decoder.
+ void Flush();
+
+ // Causes this instance to be deleted on the thread it is bound to.
+ void Release();
+
+ // Called on the UI thread to indicate that one decode cycle has completed.
+ void OnDecodeCompleted();
+
+ bool is_decoding() const { return is_decoding_; }
+
+ protected:
+ MediaDecoderJob(const scoped_refptr<base::MessageLoopProxy>& decoder_loop,
+ MediaCodecBridge* media_codec_bridge,
+ bool is_audio);
+
+ // Release the output buffer and render it.
+ void ReleaseOutputBuffer(
+ int outputBufferIndex, size_t size,
+ const base::TimeDelta& presentation_timestamp,
+ const MediaDecoderJob::DecoderCallback& callback, DecodeStatus status);
+
+ DecodeStatus QueueInputBuffer(const AccessUnit& unit);
+
+ // Helper function to decoder data on |thread_|. |unit| contains all the data
+ // to be decoded. |start_time_ticks| and |start_presentation_timestamp|
+ // represent the system time and the presentation timestamp when the first
+ // frame is rendered. We use these information to estimate when the current
+ // frame should be rendered. If |needs_flush| is true, codec needs to be
+ // flushed at the beginning of this call.
+ void DecodeInternal(const AccessUnit& unit,
+ const base::TimeTicks& start_time_ticks,
+ const base::TimeDelta& start_presentation_timestamp,
+ bool needs_flush,
+ const MediaDecoderJob::DecoderCallback& callback);
+
+ // The UI message loop where callbacks should be dispatched.
+ scoped_refptr<base::MessageLoopProxy> ui_loop_;
+
+ // The message loop that decoder job runs on.
+ scoped_refptr<base::MessageLoopProxy> decoder_loop_;
+
+ // The media codec bridge used for decoding.
+ scoped_ptr<MediaCodecBridge> media_codec_bridge_;
+
+ // Whether the decoder needs to be flushed.
+ bool needs_flush_;
+
+ // Whether this is an audio decoder.
+ bool is_audio_;
+
+ // Whether input EOS is encountered.
+ bool input_eos_encountered_;
+
+ // Weak pointer passed to media decoder jobs for callbacks. It is bounded to
+ // the decoder thread.
+ base::WeakPtrFactory<MediaDecoderJob> weak_this_;
+
+ // Whether the decoder is actively decoding data.
+ bool is_decoding_;
+};
+
+struct DecoderJobDeleter {
+ inline void operator()(MediaDecoderJob* ptr) const { ptr->Release(); }
+};
+
+// This class handles media source extensions on Android. It uses Android
+// MediaCodec to decode audio and video streams in two separate threads.
+// IPC is being used to send data from the render process to this object.
+// TODO(qinmin): use shared memory to send data between processes.
+class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
+ public:
+ // Construct a MediaSourcePlayer object with all the needed media player
+ // callbacks.
+ MediaSourcePlayer(int player_id, MediaPlayerManager* manager);
+ virtual ~MediaSourcePlayer();
+
+ // MediaPlayerAndroid implementation.
+ virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
+ virtual void Release() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual int GetVideoWidth() OVERRIDE;
+ virtual int GetVideoHeight() OVERRIDE;
+ virtual base::TimeDelta GetCurrentTime() OVERRIDE;
+ virtual base::TimeDelta GetDuration() OVERRIDE;
+ virtual bool IsPlaying() OVERRIDE;
+ virtual bool CanPause() OVERRIDE;
+ virtual bool CanSeekForward() OVERRIDE;
+ virtual bool CanSeekBackward() OVERRIDE;
+ virtual bool IsPlayerReady() OVERRIDE;
+ virtual void OnSeekRequestAck(unsigned seek_request_id) OVERRIDE;
+ virtual void DemuxerReady(
+ const MediaPlayerHostMsg_DemuxerReady_Params& params) OVERRIDE;
+ virtual void ReadFromDemuxerAck(
+ const MediaPlayerHostMsg_ReadFromDemuxerAck_Params& params) OVERRIDE;
+ virtual void DurationChanged(const base::TimeDelta& duration) OVERRIDE;
+ virtual void SetDrmBridge(MediaDrmBridge* drm_bridge) OVERRIDE;
+
+ private:
+ // Update the current timestamp.
+ void UpdateTimestamps(const base::TimeDelta& presentation_timestamp,
+ size_t audio_output_bytes);
+
+ // Helper function for starting media playback.
+ void StartInternal();
+
+ // Playback is completed for one channel.
+ void PlaybackCompleted(bool is_audio);
+
+ // Called when the decoder finishes its task.
+ void MediaDecoderCallback(
+ bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
+ const base::TimeDelta& presentation_timestamp,
+ size_t audio_output_bytes);
+
+ // Handle pending events when all the decoder jobs finished.
+ void ProcessPendingEvents();
+
+ // Helper method to configure the decoder jobs.
+ void ConfigureVideoDecoderJob();
+ void ConfigureAudioDecoderJob();
+
+ // Flush the decoders and clean up all the data needs to be decoded.
+ void ClearDecodingData();
+
+ // Called to decoder more data.
+ void DecodeMoreAudio();
+ void DecodeMoreVideo();
+
+ // Functions check whether audio/video is present.
+ bool HasVideo();
+ bool HasAudio();
+
+ // Determine seekability based on duration.
+ bool Seekable();
+
+ // Called when the |decoder_starvation_callback_| times out.
+ void OnDecoderStarved();
+
+ // Starts the |decoder_starvation_callback_| task with the timeout value.
+ void StartStarvationCallback(const base::TimeDelta& timeout);
+
+ // Called to sync decoder jobs. This call requests data from chunk demuxer
+ // first. Then it updates |start_time_ticks_| and
+ // |start_presentation_timestamp_| so that video can resync with audio.
+ void SyncAndStartDecoderJobs();
+
+ // Functions that send IPC requests to the renderer process for more
+ // audio/video data. Returns true if a request has been sent and the decoder
+ // needs to wait, or false otherwise.
+ void RequestAudioData();
+ void RequestVideoData();
+
+ // Check whether audio or video data is available for decoders to consume.
+ bool HasAudioData() const;
+ bool HasVideoData() const;
+
+ // Helper function to set the volume.
+ void SetVolumeInternal();
+
+ enum PendingEventFlags {
+ NO_EVENT_PENDING = 0,
+ SEEK_EVENT_PENDING = 1 << 0,
+ SURFACE_CHANGE_EVENT_PENDING = 1 << 1,
+ CONFIG_CHANGE_EVENT_PENDING = 1 << 2,
+ };
+ // Pending event that the player needs to do.
+ unsigned pending_event_;
+
+ // ID to keep track of whether all the seek requests are acked.
+ unsigned seek_request_id_;
+
+ // Stats about the media.
+ base::TimeDelta duration_;
+ int width_;
+ int height_;
+ AudioCodec audio_codec_;
+ VideoCodec video_codec_;
+ int num_channels_;
+ int sampling_rate_;
+ // TODO(xhwang/qinmin): Add |video_extra_data_|.
+ std::vector<uint8> audio_extra_data_;
+ bool audio_finished_;
+ bool video_finished_;
+ bool playing_;
+ bool is_audio_encrypted_;
+ bool is_video_encrypted_;
+ double volume_;
+
+ // base::TickClock used by |clock_|.
+ base::DefaultTickClock default_tick_clock_;
+
+ // Reference clock. Keeps track of current playback time.
+ Clock clock_;
+
+ // Timestamps for providing simple A/V sync. When start decoding an audio
+ // chunk, we record its presentation timestamp and the current system time.
+ // Then we use this information to estimate when the next audio/video frame
+ // should be rendered.
+ // TODO(qinmin): Need to fix the problem if audio/video lagged too far behind
+ // due to network or decoding problem.
+ base::TimeTicks start_time_ticks_;
+ base::TimeDelta start_presentation_timestamp_;
+
+ // The surface object currently owned by the player.
+ gfx::ScopedJavaSurface surface_;
+
+ // Decoder jobs
+ scoped_ptr<AudioDecoderJob, DecoderJobDeleter> audio_decoder_job_;
+ scoped_ptr<VideoDecoderJob, DecoderJobDeleter> video_decoder_job_;
+
+ bool reconfig_audio_decoder_;
+ bool reconfig_video_decoder_;
+
+ // These variables keep track of the current decoding data.
+ // TODO(qinmin): remove these variables when we no longer relies on IPC for
+ // data passing.
+ size_t audio_access_unit_index_;
+ size_t video_access_unit_index_;
+ bool waiting_for_audio_data_;
+ bool waiting_for_video_data_;
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params received_audio_;
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params received_video_;
+
+ // A cancelable task that is posted when the audio decoder starts requesting
+ // new data. This callback runs if no data arrives before the timeout period
+ // elapses.
+ base::CancelableClosure decoder_starvation_callback_;
+
+ // Whether the audio and video decoder jobs should resync with each other.
+ bool sync_decoder_jobs_;
+
+ // Object to calculate the current audio timestamp for A/V sync.
+ scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
+
+ // Weak pointer passed to media decoder jobs for callbacks.
+ base::WeakPtrFactory<MediaSourcePlayer> weak_this_;
+
+ MediaDrmBridge* drm_bridge_;
+
+ friend class MediaSourcePlayerTest;
+ DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_SOURCE_PLAYER_H_
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
new file mode 100644
index 00000000000..40d28e43f56
--- /dev/null
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -0,0 +1,462 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_player_manager.h"
+#include "media/base/android/media_source_player.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/test_data_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gl/android/surface_texture_bridge.h"
+
+namespace media {
+
+static const int kDefaultDurationInMs = 10000;
+
+// Mock of MediaPlayerManager for testing purpose
+class MockMediaPlayerManager : public MediaPlayerManager {
+ public:
+ MockMediaPlayerManager() : num_requests_(0), last_seek_request_id_(0) {}
+ virtual ~MockMediaPlayerManager() {};
+
+ // MediaPlayerManager implementation.
+ virtual void RequestMediaResources(int player_id) OVERRIDE {}
+ virtual void ReleaseMediaResources(int player_id) OVERRIDE {}
+ virtual MediaResourceGetter* GetMediaResourceGetter() OVERRIDE {
+ return NULL;
+ }
+ virtual void OnTimeUpdate(int player_id,
+ base::TimeDelta current_time) OVERRIDE {}
+ virtual void OnMediaMetadataChanged(
+ int player_id, base::TimeDelta duration, int width, int height,
+ bool success) OVERRIDE {}
+ virtual void OnPlaybackComplete(int player_id) OVERRIDE {
+ if (message_loop_.is_running())
+ message_loop_.Quit();
+ }
+ virtual void OnMediaInterrupted(int player_id) OVERRIDE {}
+ virtual void OnBufferingUpdate(int player_id, int percentage) OVERRIDE {}
+ virtual void OnSeekComplete(int player_id,
+ base::TimeDelta current_time) OVERRIDE {}
+ virtual void OnError(int player_id, int error) OVERRIDE {}
+ virtual void OnVideoSizeChanged(int player_id, int width,
+ int height) OVERRIDE {}
+ virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
+ virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
+ virtual void DestroyAllMediaPlayers() OVERRIDE {}
+ virtual void OnReadFromDemuxer(int player_id,
+ media::DemuxerStream::Type type) OVERRIDE {
+ num_requests_++;
+ if (message_loop_.is_running())
+ message_loop_.Quit();
+ }
+ virtual void OnMediaSeekRequest(int player_id, base::TimeDelta time_to_seek,
+ unsigned seek_request_id) OVERRIDE {
+ last_seek_request_id_ = seek_request_id;
+ }
+ virtual void OnMediaConfigRequest(int player_id) OVERRIDE {}
+ virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
+ return NULL;
+ }
+ virtual void OnProtectedSurfaceRequested(int player_id) OVERRIDE {}
+ virtual void OnKeyAdded(int key_id,
+ const std::string& session_id) OVERRIDE {}
+ virtual void OnKeyError(int key_id,
+ const std::string& session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) OVERRIDE {}
+ virtual void OnKeyMessage(int key_id,
+ const std::string& session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) OVERRIDE {}
+
+ int num_requests() const { return num_requests_; }
+ unsigned last_seek_request_id() const { return last_seek_request_id_; }
+ base::MessageLoop* message_loop() { return &message_loop_; }
+
+ private:
+ // The number of request this object sents for decoding data.
+ int num_requests_;
+ unsigned last_seek_request_id_;
+ base::MessageLoop message_loop_;
+};
+
+class MediaSourcePlayerTest : public testing::Test {
+ public:
+ MediaSourcePlayerTest() {
+ manager_.reset(new MockMediaPlayerManager());
+ player_.reset(new MediaSourcePlayer(0, manager_.get()));
+ }
+ virtual ~MediaSourcePlayerTest() {}
+
+ protected:
+ // Get the decoder job from the MediaSourcePlayer.
+ MediaDecoderJob* GetMediaDecoderJob(bool is_audio) {
+ if (is_audio) {
+ return reinterpret_cast<MediaDecoderJob*>(
+ player_->audio_decoder_job_.get());
+ }
+ return reinterpret_cast<MediaDecoderJob*>(
+ player_->video_decoder_job_.get());
+ }
+
+ // Starts an audio decoder job.
+ void StartAudioDecoderJob() {
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ params.duration_ms = kDefaultDurationInMs;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
+ params.audio_extra_data = std::vector<uint8>(
+ buffer->data(),
+ buffer->data() + buffer->data_size());
+ Start(params);
+ }
+
+ void StartVideoDecoderJob() {
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.video_codec = kCodecVP8;
+ params.video_size = gfx::Size(320, 240);
+ params.is_video_encrypted = false;
+ params.duration_ms = kDefaultDurationInMs;
+ Start(params);
+ }
+
+ // Starts decoding the data.
+ void Start(const MediaPlayerHostMsg_DemuxerReady_Params& params) {
+ player_->DemuxerReady(params);
+ player_->Start();
+ }
+
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params
+ CreateReadFromDemuxerAckForAudio() {
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
+ ack_params.type = DemuxerStream::AUDIO;
+ ack_params.access_units.resize(1);
+ ack_params.access_units[0].status = DemuxerStream::kOk;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-packet-0");
+ ack_params.access_units[0].data = std::vector<uint8>(
+ buffer->data(), buffer->data() + buffer->data_size());
+ // Vorbis needs 4 extra bytes padding on Android to decode properly. Check
+ // NuMediaExtractor.cpp in Android source code.
+ uint8 padding[4] = { 0xff , 0xff , 0xff , 0xff };
+ ack_params.access_units[0].data.insert(
+ ack_params.access_units[0].data.end(), padding, padding + 4);
+ return ack_params;
+ }
+
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params
+ CreateReadFromDemuxerAckForVideo() {
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
+ ack_params.type = DemuxerStream::VIDEO;
+ ack_params.access_units.resize(1);
+ ack_params.access_units[0].status = DemuxerStream::kOk;
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("vp8-I-frame-320x240");
+ ack_params.access_units[0].data = std::vector<uint8>(
+ buffer->data(), buffer->data() + buffer->data_size());
+ return ack_params;
+ }
+
+ base::TimeTicks StartTimeTicks() {
+ return player_->start_time_ticks_;
+ }
+
+ protected:
+ scoped_ptr<MockMediaPlayerManager> manager_;
+ scoped_ptr<MediaSourcePlayer> player_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
+};
+
+TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test audio decoder job will be created when codec is successfully started.
+ StartAudioDecoderJob();
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test audio decoder job will not be created when failed to start the codec.
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ params.duration_ms = kDefaultDurationInMs;
+ uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
+ params.audio_extra_data.insert(params.audio_extra_data.begin(),
+ invalid_codec_data, invalid_codec_data + 4);
+ Start(params);
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test video decoder job will be created when surface is valid.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Video decoder job will not be created until surface is available.
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ // The decoder job should be ready now.
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test video decoder job will be created when surface is valid.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Video decoder job will not be created until surface is available.
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Release the surface texture.
+ surface_texture = NULL;
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_EQ(NULL, GetMediaDecoderJob(false));
+ EXPECT_EQ(0, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test decoder job will resend a ReadFromDemuxer request after seek.
+ StartAudioDecoderJob();
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+
+ // Initiate a seek
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ // Sending back the seek ACK, this should trigger the player to call
+ // OnReadFromDemuxer() again.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_EQ(2, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test SetVideoSurface() will not cause an extra seek while the player is
+ // waiting for a seek ACK.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ StartVideoDecoderJob();
+ // Player is still waiting for SetVideoSurface(), so no request is sent.
+ EXPECT_EQ(0, manager_->num_requests());
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_TRUE(NULL == GetMediaDecoderJob(false));
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ // Send the seek ack, player should start requesting data afterwards.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test decoder job will not start until all pending seek event is handled.
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ params.duration_ms = kDefaultDurationInMs;
+ player_->DemuxerReady(params);
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Initiate a seek
+ player_->SeekTo(base::TimeDelta());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+
+ player_->Start();
+ EXPECT_EQ(NULL, GetMediaDecoderJob(true));
+ EXPECT_EQ(0, manager_->num_requests());
+
+ // Sending back the seek ACK.
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test that if the decoding job is not fully stopped after Pause(),
+ // calling Start() will be a noop.
+ StartAudioDecoderJob();
+
+ MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
+ EXPECT_TRUE(NULL != decoder_job);
+ EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Sending data to player.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Decoder job will not immediately stop after Pause() since it is
+ // running on another thread.
+ player_->Pause();
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Nothing happens when calling Start() again.
+ player_->Start();
+ // Verify that Start() will not destroy and recreate the decoder job.
+ EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ manager_->message_loop()->Run();
+ // The decoder job should finish and a new request will be sent.
+ EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+}
+
+TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test that when Start() is called, video decoder jobs will wait for audio
+ // decoder job before start decoding the data.
+ MediaPlayerHostMsg_DemuxerReady_Params params;
+ params.audio_codec = kCodecVorbis;
+ params.audio_channels = 2;
+ params.audio_sampling_rate = 44100;
+ params.is_audio_encrypted = false;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
+ params.audio_extra_data = std::vector<uint8>(
+ buffer->data(),
+ buffer->data() + buffer->data_size());
+ params.video_codec = kCodecVP8;
+ params.video_size = gfx::Size(320, 240);
+ params.is_video_encrypted = false;
+ params.duration_ms = kDefaultDurationInMs;
+ Start(params);
+ EXPECT_EQ(0, manager_->num_requests());
+
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ player_->SetVideoSurface(surface.Pass());
+ EXPECT_EQ(1u, manager_->last_seek_request_id());
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+
+ MediaDecoderJob* audio_decoder_job = GetMediaDecoderJob(true);
+ MediaDecoderJob* video_decoder_job = GetMediaDecoderJob(false);
+ EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_FALSE(audio_decoder_job->is_decoding());
+ EXPECT_FALSE(video_decoder_job->is_decoding());
+
+ // Sending audio data to player, audio decoder should not start.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
+ EXPECT_FALSE(video_decoder_job->is_decoding());
+
+ // Sending video data to player, both decoders should start now.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ EXPECT_TRUE(audio_decoder_job->is_decoding());
+ EXPECT_TRUE(video_decoder_job->is_decoding());
+}
+
+// Disabled due to http://crbug.com/266041.
+// TODO(xhwang/qinmin): Fix this test and reenable it.
+TEST_F(MediaSourcePlayerTest,
+ DISABLED_StartTimeTicksResetAfterDecoderUnderruns) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test start time ticks will reset after decoder job underruns.
+ StartAudioDecoderJob();
+ EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ EXPECT_EQ(1, manager_->num_requests());
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ manager_->message_loop()->Run();
+ // The decoder job should finish and a new request will be sent.
+ EXPECT_EQ(2, manager_->num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+ base::TimeTicks previous = StartTimeTicks();
+
+ // Let the decoder timeout and execute the OnDecoderStarved() callback.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+ manager_->message_loop()->RunUntilIdle();
+
+ // Send new data to the decoder. This should reset the start time ticks.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForAudio());
+ base::TimeTicks current = StartTimeTicks();
+ EXPECT_LE(100.0, (current - previous).InMillisecondsF());
+}
+
+TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterInputEOS) {
+ if (!MediaCodecBridge::IsAvailable())
+ return;
+
+ // Test MediaSourcePlayer will not request for new data after input EOS is
+ // reached.
+ scoped_refptr<gfx::SurfaceTextureBridge> surface_texture(
+ new gfx::SurfaceTextureBridge(0));
+ gfx::ScopedJavaSurface surface(surface_texture.get());
+ player_->SetVideoSurface(surface.Pass());
+ StartVideoDecoderJob();
+ player_->OnSeekRequestAck(manager_->last_seek_request_id());
+ EXPECT_EQ(1, manager_->num_requests());
+ // Send the first input chunk.
+ player_->ReadFromDemuxerAck(CreateReadFromDemuxerAckForVideo());
+ manager_->message_loop()->Run();
+ EXPECT_EQ(2, manager_->num_requests());
+
+ // Send EOS.
+ MediaPlayerHostMsg_ReadFromDemuxerAck_Params ack_params;
+ ack_params.type = DemuxerStream::VIDEO;
+ ack_params.access_units.resize(1);
+ ack_params.access_units[0].status = DemuxerStream::kOk;
+ ack_params.access_units[0].end_of_stream = true;
+ player_->ReadFromDemuxerAck(ack_params);
+ manager_->message_loop()->Run();
+ // No more request for data should be made.
+ EXPECT_EQ(2, manager_->num_requests());
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/webaudio_media_codec_bridge.cc b/chromium/media/base/android/webaudio_media_codec_bridge.cc
new file mode 100644
index 00000000000..94f059a9a1e
--- /dev/null
+++ b/chromium/media/base/android/webaudio_media_codec_bridge.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/webaudio_media_codec_bridge.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <vector>
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_array.h"
+#include "base/android/jni_string.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "jni/WebAudioMediaCodecBridge_jni.h"
+#include "media/base/android/webaudio_media_codec_info.h"
+
+
+using base::android::AttachCurrentThread;
+
+namespace media {
+
+void WebAudioMediaCodecBridge::RunWebAudioMediaCodec(
+ base::SharedMemoryHandle encoded_audio_handle,
+ base::FileDescriptor pcm_output,
+ uint32_t data_size) {
+ WebAudioMediaCodecBridge bridge(encoded_audio_handle, pcm_output, data_size);
+
+ bridge.DecodeInMemoryAudioFile();
+}
+
+WebAudioMediaCodecBridge::WebAudioMediaCodecBridge(
+ base::SharedMemoryHandle encoded_audio_handle,
+ base::FileDescriptor pcm_output,
+ uint32_t data_size)
+ : encoded_audio_handle_(encoded_audio_handle),
+ pcm_output_(pcm_output.fd),
+ data_size_(data_size) {
+ DVLOG(1) << "WebAudioMediaCodecBridge start **********************"
+ << " output fd = " << pcm_output.fd;
+}
+
+WebAudioMediaCodecBridge::~WebAudioMediaCodecBridge() {
+ if (close(pcm_output_)) {
+ DVLOG(1) << "Couldn't close output fd " << pcm_output_
+ << ": " << strerror(errno);
+ }
+}
+
+int WebAudioMediaCodecBridge::SaveEncodedAudioToFile(
+ JNIEnv* env,
+ jobject context) {
+ // Create a temporary file where we can save the encoded audio data.
+ std::string temporaryFile =
+ base::android::ConvertJavaStringToUTF8(
+ env,
+ Java_WebAudioMediaCodecBridge_CreateTempFile(env, context).obj());
+
+ // Open the file and unlink it, so that it will be actually removed
+ // when we close the file.
+ int fd = open(temporaryFile.c_str(), O_RDWR);
+ if (unlink(temporaryFile.c_str())) {
+ VLOG(0) << "Couldn't unlink temp file " << temporaryFile
+ << ": " << strerror(errno);
+ }
+
+ if (fd < 0) {
+ return -1;
+ }
+
+ // Create a local mapping of the shared memory containing the
+ // encoded audio data, and save the contents to the temporary file.
+ base::SharedMemory encoded_data(encoded_audio_handle_, true);
+
+ if (!encoded_data.Map(data_size_)) {
+ VLOG(0) << "Unable to map shared memory!";
+ return -1;
+ }
+
+ if (static_cast<uint32_t>(write(fd, encoded_data.memory(), data_size_))
+ != data_size_) {
+ VLOG(0) << "Failed to write all audio data to temp file!";
+ return -1;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+
+ return fd;
+}
+
+bool WebAudioMediaCodecBridge::DecodeInMemoryAudioFile() {
+ JNIEnv* env = AttachCurrentThread();
+ CHECK(env);
+
+ jobject context = base::android::GetApplicationContext();
+
+ int sourceFd = SaveEncodedAudioToFile(env, context);
+
+ if (sourceFd < 0)
+ return false;
+
+ jboolean decoded = Java_WebAudioMediaCodecBridge_decodeAudioFile(
+ env,
+ context,
+ reinterpret_cast<intptr_t>(this),
+ sourceFd,
+ data_size_);
+
+ close(sourceFd);
+
+ DVLOG(1) << "decoded = " << (decoded ? "true" : "false");
+
+ return decoded;
+}
+
+void WebAudioMediaCodecBridge::InitializeDestination(
+ JNIEnv* env,
+ jobject /*java object*/,
+ jint channel_count,
+ jint sample_rate,
+ jlong duration_microsec) {
+ // Send information about this audio file: number of channels,
+ // sample rate (Hz), and the number of frames.
+ struct WebAudioMediaCodecInfo info = {
+ static_cast<unsigned long>(channel_count),
+ static_cast<unsigned long>(sample_rate),
+ // The number of frames is the duration of the file
+ // (in microseconds) times the sample rate.
+ static_cast<unsigned long>(
+ 0.5 + (duration_microsec * 0.000001 *
+ sample_rate))
+ };
+
+ DVLOG(1) << "InitializeDestination:"
+ << " channel count = " << channel_count
+ << " rate = " << sample_rate
+ << " duration = " << duration_microsec << " microsec";
+
+ HANDLE_EINTR(write(pcm_output_, &info, sizeof(info)));
+}
+
+void WebAudioMediaCodecBridge::OnChunkDecoded(
+ JNIEnv* env,
+ jobject /*java object*/,
+ jobject buf,
+ jint buf_size,
+ jint input_channel_count,
+ jint output_channel_count) {
+
+ if (buf_size <= 0 || !buf)
+ return;
+
+ int8_t* buffer =
+ static_cast<int8_t*>(env->GetDirectBufferAddress(buf));
+ size_t count = static_cast<size_t>(buf_size);
+ std::vector<int16_t> decoded_data;
+
+ if (input_channel_count == 1 && output_channel_count == 2) {
+ // See crbug.com/266006. The file has one channel, but the
+ // decoder decided to return two channels. To be consistent with
+ // the number of channels in the file, only send one channel (the
+ // first).
+ int16_t* data = static_cast<int16_t*>(env->GetDirectBufferAddress(buf));
+ int frame_count = buf_size / sizeof(*data) / 2;
+
+ decoded_data.resize(frame_count);
+ for (int k = 0; k < frame_count; ++k) {
+ decoded_data[k] = *data;
+ data += 2;
+ }
+ buffer = reinterpret_cast<int8_t*>(vector_as_array(&decoded_data));
+ DCHECK(buffer);
+ count = frame_count * sizeof(*data);
+ }
+
+ // Write out the data to the pipe in small chunks if necessary.
+ while (count > 0) {
+ int bytes_to_write = (count >= PIPE_BUF) ? PIPE_BUF : count;
+ ssize_t bytes_written = HANDLE_EINTR(write(pcm_output_,
+ buffer,
+ bytes_to_write));
+ if (bytes_written == -1)
+ break;
+ count -= bytes_written;
+ buffer += bytes_written;
+ }
+}
+
+bool WebAudioMediaCodecBridge::RegisterWebAudioMediaCodecBridge(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace
diff --git a/chromium/media/base/android/webaudio_media_codec_bridge.h b/chromium/media/base/android/webaudio_media_codec_bridge.h
new file mode 100644
index 00000000000..fda612683fc
--- /dev/null
+++ b/chromium/media/base/android/webaudio_media_codec_bridge.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_BRIDGE_H_
+#define MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_BRIDGE_H_
+
+#include <jni.h>
+
+#include "base/file_descriptor_posix.h"
+#include "base/memory/shared_memory.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// This class serves as a bridge for native code to call Java
+// functions in the Android MediaCodec class. See
+// http://developer.android.com/reference/android/media/MediaCodec.html.
+class MEDIA_EXPORT WebAudioMediaCodecBridge {
+ public:
+ // Create the bridge with the given file descriptors. We read from
+ // |encoded_audio_handle| to get the encoded audio data. Audio file
+ // information and decoded PCM samples are written to |pcm_output|.
+ // We also take ownership of |pcm_output|.
+ WebAudioMediaCodecBridge(base::SharedMemoryHandle encoded_audio_handle,
+ base::FileDescriptor pcm_output,
+ uint32_t data_size);
+ ~WebAudioMediaCodecBridge();
+
+ // Inform JNI about this bridge. Returns true if registration
+ // succeeded.
+ static bool RegisterWebAudioMediaCodecBridge(JNIEnv* env);
+
+ // Start MediaCodec to process the encoded data in
+ // |encoded_audio_handle|. The PCM samples are sent to |pcm_output|.
+ static void RunWebAudioMediaCodec(
+ base::SharedMemoryHandle encoded_audio_handle,
+ base::FileDescriptor pcm_output,
+ uint32_t data_size);
+
+ void OnChunkDecoded(JNIEnv* env,
+ jobject /*java object*/,
+ jobject buf,
+ jint buf_size,
+ jint input_channel_count,
+ jint output_channel_count);
+
+ void InitializeDestination(JNIEnv* env,
+ jobject /*java object*/,
+ jint channel_count,
+ jint sample_rate,
+ jlong duration_us);
+
+ private:
+ // Handles MediaCodec processing of the encoded data in
+ // |encoded_audio_handle_| and sends the pcm data to |pcm_output_|.
+ // Returns true if decoding was successful.
+ bool DecodeInMemoryAudioFile();
+
+ // Save encoded audio data to a temporary file and return the file
+ // descriptor to that file. -1 is returned if the audio data could
+ // not be saved for any reason.
+ int SaveEncodedAudioToFile(JNIEnv*, jobject);
+
+ // The encoded audio data is read from this file descriptor for the
+ // shared memory that holds the encoded data.
+ base::SharedMemoryHandle encoded_audio_handle_;
+
+ // The audio file information and decoded pcm data are written to
+ // this file descriptor. We take ownership of this descriptor.
+ int pcm_output_;
+
+ // The length of the encoded data.
+ uint32_t data_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebAudioMediaCodecBridge);
+};
+
+} // namespace media
+#endif // MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_BRIDGE_H_
diff --git a/chromium/media/base/android/webaudio_media_codec_info.h b/chromium/media/base/android/webaudio_media_codec_info.h
new file mode 100644
index 00000000000..423af91e8cb
--- /dev/null
+++ b/chromium/media/base/android/webaudio_media_codec_info.h
@@ -0,0 +1,20 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_INFO_H_
+#define MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_INFO_H_
+
+namespace media {
+
+// This structure holds the information about the audio file
+// determined by MediaCodec that is needed by the audio decoder to
+// create the necessary destination bus.
+struct WebAudioMediaCodecInfo {
+ unsigned long channel_count;
+ unsigned long sample_rate;
+ unsigned long number_of_frames;
+};
+
+} // namespace media
+#endif // MEDIA_BASE_ANDROID_WEBAUDIO_MEDIA_CODEC_INFO_H_
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
new file mode 100644
index 00000000000..b2cdd8c41a7
--- /dev/null
+++ b/chromium/media/base/audio_buffer.cc
@@ -0,0 +1,254 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer.h"
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+// Alignment of each channel's data; this must match what ffmpeg expects
+// (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
+// order to work on all processors.
+enum { kChannelAlignment = 32 };
+
+AudioBuffer::AudioBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ bool create_buffer,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration)
+ : sample_format_(sample_format),
+ channel_count_(channel_count),
+ adjusted_frame_count_(frame_count),
+ trim_start_(0),
+ end_of_stream_(!create_buffer && data == NULL && frame_count == 0),
+ timestamp_(timestamp),
+ duration_(duration) {
+ CHECK_GE(channel_count, 0);
+ CHECK_LE(channel_count, limits::kMaxChannels);
+ CHECK_GE(frame_count, 0);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ DCHECK_LE(bytes_per_channel, kChannelAlignment);
+ int data_size = frame_count * bytes_per_channel;
+
+ // Empty buffer?
+ if (!create_buffer)
+ return;
+
+ if (sample_format == kSampleFormatPlanarF32 ||
+ sample_format == kSampleFormatPlanarS16) {
+ // Planar data, so need to allocate buffer for each channel.
+ // Determine per channel data size, taking into account alignment.
+ int block_size_per_channel =
+ (data_size + kChannelAlignment - 1) & ~(kChannelAlignment - 1);
+ DCHECK_GE(block_size_per_channel, data_size);
+
+ // Allocate a contiguous buffer for all the channel data.
+ data_.reset(static_cast<uint8*>(base::AlignedAlloc(
+ channel_count * block_size_per_channel, kChannelAlignment)));
+ channel_data_.reserve(channel_count);
+
+ // Copy each channel's data into the appropriate spot.
+ for (int i = 0; i < channel_count; ++i) {
+ channel_data_.push_back(data_.get() + i * block_size_per_channel);
+ if (data)
+ memcpy(channel_data_[i], data[i], data_size);
+ }
+ return;
+ }
+
+ // Remaining formats are interleaved data.
+ DCHECK(sample_format_ == kSampleFormatU8 ||
+ sample_format_ == kSampleFormatS16 ||
+ sample_format_ == kSampleFormatS32 ||
+ sample_format_ == kSampleFormatF32) << sample_format_;
+ // Allocate our own buffer and copy the supplied data into it. Buffer must
+ // contain the data for all channels.
+ data_size *= channel_count;
+ data_.reset(
+ static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
+ if (data)
+ memcpy(data_.get(), data[0], data_size);
+}
+
+AudioBuffer::~AudioBuffer() {}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
+ SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
+ CHECK(data[0]);
+ return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_count,
+ frame_count,
+ true,
+ data,
+ timestamp,
+ duration));
+}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count) {
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
+ return make_scoped_refptr(new AudioBuffer(sample_format,
+ channel_count,
+ frame_count,
+ true,
+ NULL,
+ kNoTimestamp(),
+ kNoTimestamp()));
+}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
+ int channel_count,
+ int frame_count,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration) {
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
+ // Since data == NULL, format doesn't matter.
+ return make_scoped_refptr(new AudioBuffer(kSampleFormatF32,
+ channel_count,
+ frame_count,
+ false,
+ NULL,
+ timestamp,
+ duration));
+}
+
+// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
+ return make_scoped_refptr(new AudioBuffer(
+ kUnknownSampleFormat, 1, 0, false, NULL, kNoTimestamp(), kNoTimestamp()));
+}
+
+// Convert int16 values in the range [kint16min, kint16max] to [-1.0, 1.0].
+static inline float ConvertS16ToFloat(int16 value) {
+ return value * (value < 0 ? -1.0f / kint16min : 1.0f / kint16max);
+}
+
+void AudioBuffer::ReadFrames(int frames_to_copy,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ // Deinterleave each channel (if necessary) and convert to 32bit
+ // floating-point with nominal range -1.0 -> +1.0 (if necessary).
+
+ // |dest| must have the same number of channels, and the number of frames
+ // specified must be in range.
+ DCHECK(!end_of_stream());
+ DCHECK_EQ(dest->channels(), channel_count_);
+ DCHECK_LE(source_frame_offset + frames_to_copy, adjusted_frame_count_);
+ DCHECK_LE(dest_frame_offset + frames_to_copy, dest->frames());
+
+ // Move the start past any frames that have been trimmed.
+ source_frame_offset += trim_start_;
+
+ if (!data_) {
+ // Special case for an empty buffer.
+ dest->ZeroFramesPartial(dest_frame_offset, frames_to_copy);
+ return;
+ }
+
+ if (sample_format_ == kSampleFormatPlanarF32) {
+ // Format is planar float32. Copy the data from each channel as a block.
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ const float* source_data =
+ reinterpret_cast<const float*>(channel_data_[ch]) +
+ source_frame_offset;
+ memcpy(dest->channel(ch) + dest_frame_offset,
+ source_data,
+ sizeof(float) * frames_to_copy);
+ }
+ return;
+ }
+
+ if (sample_format_ == kSampleFormatPlanarS16) {
+ // Format is planar signed16. Convert each value into float and insert into
+ // output channel data.
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ const int16* source_data =
+ reinterpret_cast<const int16*>(channel_data_[ch]) +
+ source_frame_offset;
+ float* dest_data = dest->channel(ch) + dest_frame_offset;
+ for (int i = 0; i < frames_to_copy; ++i) {
+ dest_data[i] = ConvertS16ToFloat(source_data[i]);
+ }
+ }
+ return;
+ }
+
+ if (sample_format_ == kSampleFormatF32) {
+ // Format is interleaved float32. Copy the data into each channel.
+ const float* source_data = reinterpret_cast<const float*>(data_.get()) +
+ source_frame_offset * channel_count_;
+ for (int ch = 0; ch < channel_count_; ++ch) {
+ float* dest_data = dest->channel(ch) + dest_frame_offset;
+ for (int i = 0, offset = ch; i < frames_to_copy;
+ ++i, offset += channel_count_) {
+ dest_data[i] = source_data[offset];
+ }
+ }
+ return;
+ }
+
+ // Remaining formats are integer interleaved data. Use the deinterleaving code
+ // in AudioBus to copy the data.
+ DCHECK(sample_format_ == kSampleFormatU8 ||
+ sample_format_ == kSampleFormatS16 ||
+ sample_format_ == kSampleFormatS32);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format_);
+ int frame_size = channel_count_ * bytes_per_channel;
+ const uint8* source_data = data_.get() + source_frame_offset * frame_size;
+ dest->FromInterleavedPartial(
+ source_data, dest_frame_offset, frames_to_copy, bytes_per_channel);
+}
+
+void AudioBuffer::TrimStart(int frames_to_trim) {
+ CHECK_GE(frames_to_trim, 0);
+ CHECK_LE(frames_to_trim, adjusted_frame_count_);
+
+ // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
+ double offset = static_cast<double>(duration_.InMicroseconds()) *
+ frames_to_trim / adjusted_frame_count_;
+ base::TimeDelta offset_as_time =
+ base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
+ timestamp_ += offset_as_time;
+ duration_ -= offset_as_time;
+
+ // Finally adjust the number of frames in this buffer and where the start
+ // really is.
+ adjusted_frame_count_ -= frames_to_trim;
+ trim_start_ += frames_to_trim;
+}
+
+void AudioBuffer::TrimEnd(int frames_to_trim) {
+ CHECK_GE(frames_to_trim, 0);
+ CHECK_LE(frames_to_trim, adjusted_frame_count_);
+
+ // Adjust duration_ only to reflect the smaller number of frames.
+ double offset = static_cast<double>(duration_.InMicroseconds()) *
+ frames_to_trim / adjusted_frame_count_;
+ base::TimeDelta offset_as_time =
+ base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
+ duration_ -= offset_as_time;
+
+ // Finally adjust the number of frames in this buffer.
+ adjusted_frame_count_ -= frames_to_trim;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_buffer.h b/chromium/media/base/audio_buffer.h
new file mode 100644
index 00000000000..e52355ac4c2
--- /dev/null
+++ b/chromium/media/base/audio_buffer.h
@@ -0,0 +1,138 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_H_
+#define MEDIA_BASE_AUDIO_BUFFER_H_
+
+#include <vector>
+
+#include "base/memory/aligned_memory.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/sample_format.h"
+
+namespace media {
+class AudioBus;
+
+// An audio buffer that takes a copy of the data passed to it, holds it, and
+// copies it into an AudioBus when needed. Also supports an end of stream
+// marker.
+class MEDIA_EXPORT AudioBuffer
+ : public base::RefCountedThreadSafe<AudioBuffer> {
+ public:
+ // Create an AudioBuffer whose channel data is copied from |data|. For
+ // interleaved data, only the first buffer is used. For planar data, the
+ // number of buffers must be equal to |channel_count|. |frame_count| is the
+ // number of frames in each buffer. |data| must not be null and |frame_count|
+ // must be >= 0.
+ //
+ // TODO(jrummell): Compute duration rather than pass it in.
+ static scoped_refptr<AudioBuffer> CopyFrom(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
+ // Create an AudioBuffer with |frame_count| frames. Buffer is allocated, but
+ // not initialized. Timestamp and duration are set to kNoTimestamp().
+ static scoped_refptr<AudioBuffer> CreateBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count);
+
+ // Create an empty AudioBuffer with |frame_count| frames.
+ static scoped_refptr<AudioBuffer> CreateEmptyBuffer(
+ int channel_count,
+ int frame_count,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
+ // Create a AudioBuffer indicating we've reached end of stream.
+ // Calling any method other than end_of_stream() on the resulting buffer
+ // is disallowed.
+ static scoped_refptr<AudioBuffer> CreateEOSBuffer();
+
+ // Copy frames into |dest|. |frames_to_copy| is the number of frames to copy.
+ // |source_frame_offset| specifies how many frames in the buffer to skip
+ // first. |dest_frame_offset| is the frame offset in |dest|. The frames are
+ // converted from their source format into planar float32 data (which is all
+ // that AudioBus handles).
+ void ReadFrames(int frames_to_copy,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest);
+
+ // Trim an AudioBuffer by removing |frames_to_trim| frames from the start.
+ // Timestamp and duration are adjusted to reflect the fewer frames.
+ // Note that repeated calls to TrimStart() may result in timestamp() and
+ // duration() being off by a few microseconds due to rounding issues.
+ void TrimStart(int frames_to_trim);
+
+ // Trim an AudioBuffer by removing |frames_to_trim| frames from the end.
+ // Duration is adjusted to reflect the fewer frames.
+ void TrimEnd(int frames_to_trim);
+
+ // Return the number of channels.
+ int channel_count() const { return channel_count_; }
+
+ // Return the number of frames held.
+ int frame_count() const { return adjusted_frame_count_; }
+
+ // Access to constructor parameters.
+ base::TimeDelta timestamp() const { return timestamp_; }
+ base::TimeDelta duration() const { return duration_; }
+
+ // TODO(jrummell): Remove set_timestamp() and set_duration() once
+ // DecryptingAudioDecoder::EnqueueFrames() is changed to set them when
+ // creating the buffer. See http://crbug.com/255261.
+ void set_timestamp(base::TimeDelta timestamp) { timestamp_ = timestamp; }
+ void set_duration(base::TimeDelta duration) { duration_ = duration; }
+
+ // If there's no data in this buffer, it represents end of stream.
+ bool end_of_stream() const { return end_of_stream_; }
+
+ // Access to the raw buffer for ffmpeg to write directly to. Data for planar
+ // data is grouped by channel.
+ uint8* writable_data() { return data_.get(); }
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioBuffer>;
+
+ // Allocates aligned contiguous buffer to hold all channel data (1 block for
+ // interleaved data, |channel_count| blocks for planar data), copies
+ // [data,data+data_size) to the allocated buffer(s). If |data| is null, no
+ // data is copied. If |create_buffer| is false, no data buffer is created (or
+ // copied to).
+ AudioBuffer(SampleFormat sample_format,
+ int channel_count,
+ int frame_count,
+ bool create_buffer,
+ const uint8* const* data,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
+ virtual ~AudioBuffer();
+
+ const SampleFormat sample_format_;
+ const int channel_count_;
+ int adjusted_frame_count_;
+ int trim_start_;
+ const bool end_of_stream_;
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+
+ // Contiguous block of channel data.
+ scoped_ptr_malloc<uint8, base::ScopedPtrAlignedFree> data_;
+
+ // For planar data, points to each channels data.
+ std::vector<uint8*> channel_data_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_H_
diff --git a/chromium/media/base/audio_buffer_queue.cc b/chromium/media/base/audio_buffer_queue.cc
new file mode 100644
index 00000000000..abe8fcef4eb
--- /dev/null
+++ b/chromium/media/base/audio_buffer_queue.cc
@@ -0,0 +1,163 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer_queue.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+AudioBufferQueue::AudioBufferQueue() { Clear(); }
+AudioBufferQueue::~AudioBufferQueue() {}
+
+void AudioBufferQueue::Clear() {
+ buffers_.clear();
+ current_buffer_ = buffers_.begin();
+ current_buffer_offset_ = 0;
+ frames_ = 0;
+ current_time_ = kNoTimestamp();
+}
+
+void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
+ // If we have just written the first buffer, update |current_time_| to be the
+ // start time.
+ if (buffers_.empty() && buffer_in->timestamp() != kNoTimestamp()) {
+ current_time_ = buffer_in->timestamp();
+ }
+
+ // Add the buffer to the queue. Inserting into deque invalidates all
+ // iterators, so point to the first buffer.
+ buffers_.push_back(buffer_in);
+ current_buffer_ = buffers_.begin();
+
+ // Update the |frames_| counter since we have added frames.
+ frames_ += buffer_in->frame_count();
+ CHECK_GT(frames_, 0); // make sure it doesn't overflow.
+}
+
+int AudioBufferQueue::ReadFrames(int frames,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ DCHECK_GE(dest->frames(), frames + dest_frame_offset);
+ return InternalRead(frames, true, 0, dest_frame_offset, dest);
+}
+
+int AudioBufferQueue::PeekFrames(int frames,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ DCHECK_GE(dest->frames(), frames);
+ return InternalRead(
+ frames, false, source_frame_offset, dest_frame_offset, dest);
+}
+
+void AudioBufferQueue::SeekFrames(int frames) {
+ // Perform seek only if we have enough bytes in the queue.
+ CHECK_LE(frames, frames_);
+ int taken = InternalRead(frames, true, 0, 0, NULL);
+ DCHECK_EQ(taken, frames);
+}
+
+int AudioBufferQueue::InternalRead(int frames,
+ bool advance_position,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ // Counts how many frames are actually read from the buffer queue.
+ int taken = 0;
+ BufferQueue::iterator current_buffer = current_buffer_;
+ int current_buffer_offset = current_buffer_offset_;
+
+ int frames_to_skip = source_frame_offset;
+ while (taken < frames) {
+ // |current_buffer| is valid since the first time this buffer is appended
+ // with data. Make sure there is data to be processed.
+ if (current_buffer == buffers_.end())
+ break;
+
+ scoped_refptr<AudioBuffer> buffer = *current_buffer;
+
+ int remaining_frames_in_buffer =
+ buffer->frame_count() - current_buffer_offset;
+
+ if (frames_to_skip > 0) {
+ // If there are frames to skip, do it first. May need to skip into
+ // subsequent buffers.
+ int skipped = std::min(remaining_frames_in_buffer, frames_to_skip);
+ current_buffer_offset += skipped;
+ frames_to_skip -= skipped;
+ } else {
+ // Find the right amount to copy from the current buffer. We shall copy no
+ // more than |frames| frames in total and each single step copies no more
+ // than the current buffer size.
+ int copied = std::min(frames - taken, remaining_frames_in_buffer);
+
+ // if |dest| is NULL, there's no need to copy.
+ if (dest) {
+ buffer->ReadFrames(
+ copied, current_buffer_offset, dest_frame_offset + taken, dest);
+ }
+
+ // Increase total number of frames copied, which regulates when to end
+ // this loop.
+ taken += copied;
+
+ // We have read |copied| frames from the current buffer. Advance the
+ // offset.
+ current_buffer_offset += copied;
+ }
+
+ // Has the buffer has been consumed?
+ if (current_buffer_offset == buffer->frame_count()) {
+ if (advance_position) {
+ // Next buffer may not have timestamp, so we need to update current
+ // timestamp before switching to the next buffer.
+ UpdateCurrentTime(current_buffer, current_buffer_offset);
+ }
+
+ // If we are at the last buffer, no more data to be copied, so stop.
+ BufferQueue::iterator next = current_buffer + 1;
+ if (next == buffers_.end())
+ break;
+
+ // Advances the iterator.
+ current_buffer = next;
+ current_buffer_offset = 0;
+ }
+ }
+
+ if (advance_position) {
+ // Update the appropriate values since |taken| frames have been copied out.
+ frames_ -= taken;
+ DCHECK_GE(frames_, 0);
+ DCHECK(current_buffer_ != buffers_.end() || frames_ == 0);
+
+ UpdateCurrentTime(current_buffer, current_buffer_offset);
+
+ // Remove any buffers before the current buffer as there is no going
+ // backwards.
+ buffers_.erase(buffers_.begin(), current_buffer);
+ current_buffer_ = buffers_.begin();
+ current_buffer_offset_ = current_buffer_offset;
+ }
+
+ return taken;
+}
+
+void AudioBufferQueue::UpdateCurrentTime(BufferQueue::iterator buffer,
+ int offset) {
+ if (buffer != buffers_.end() && (*buffer)->timestamp() != kNoTimestamp()) {
+ double time_offset = ((*buffer)->duration().InMicroseconds() * offset) /
+ static_cast<double>((*buffer)->frame_count());
+ current_time_ =
+ (*buffer)->timestamp() + base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(time_offset + 0.5));
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_buffer_queue.h b/chromium/media/base/audio_buffer_queue.h
new file mode 100644
index 00000000000..58823f2646b
--- /dev/null
+++ b/chromium/media/base/audio_buffer_queue.h
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
+#define MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
+
+#include <deque>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+
+// A queue of AudioBuffers to support reading of arbitrary chunks of a media
+// data source. Audio data can be copied into an AudioBus for output. The
+// current position can be forwarded to anywhere in the buffered data.
+//
+// This class is not inherently thread-safe. Concurrent access must be
+// externally serialized.
+class MEDIA_EXPORT AudioBufferQueue {
+ public:
+ AudioBufferQueue();
+ ~AudioBufferQueue();
+
+ // Clears the buffer queue.
+ void Clear();
+
+ // Appends |buffer_in| to this queue.
+ void Append(const scoped_refptr<AudioBuffer>& buffer_in);
+
+ // Reads a maximum of |frames| frames into |dest| from the current position.
+ // Returns the number of frames read. The current position will advance by the
+ // amount of frames read. |dest_frame_offset| specifies a starting offset into
+ // |dest|. On each call, the frames are converted from their source format
+ // into the destination AudioBus.
+ int ReadFrames(int frames, int dest_frame_offset, AudioBus* dest);
+
+ // Copies up to |frames| frames from current position to |dest|. Returns
+ // number of frames copied. Doesn't advance current position. Starts at
+ // |source_frame_offset| from current position. |dest_frame_offset| specifies
+ // a starting offset into |dest|. On each call, the frames are converted from
+ // their source format into the destination AudioBus.
+ int PeekFrames(int frames,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest);
+
+ // Moves the current position forward by |frames| frames. If |frames| exceeds
+ // frames available, the seek operation will fail.
+ void SeekFrames(int frames);
+
+ // Returns the number of frames buffered beyond the current position.
+ int frames() const { return frames_; }
+
+ // Returns the current timestamp, taking into account current offset. The
+ // value calculated based on the timestamp of the current buffer. If timestamp
+ // for the current buffer is set to 0, then returns value that corresponds to
+ // the last position in a buffer that had timestamp set. kNoTimestamp() is
+ // returned if no buffers we read from had timestamp set.
+ base::TimeDelta current_time() const { return current_time_; }
+
+ private:
+ // Definition of the buffer queue.
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+
+ // An internal method shared by ReadFrames() and SeekFrames() that actually
+ // does reading. It reads a maximum of |frames| frames into |dest|. Returns
+ // the number of frames read. The current position will be moved forward by
+ // the number of frames read if |advance_position| is set. If |dest| is NULL,
+ // only the current position will advance but no data will be copied.
+ // |source_frame_offset| can be used to skip frames before reading.
+ // |dest_frame_offset| specifies a starting offset into |dest|.
+ int InternalRead(int frames,
+ bool advance_position,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest);
+
+ // Updates |current_time_| with the time that corresponds to the specified
+ // position in the buffer.
+ void UpdateCurrentTime(BufferQueue::iterator buffer, int offset);
+
+ BufferQueue::iterator current_buffer_;
+ BufferQueue buffers_;
+ int current_buffer_offset_;
+
+ // Number of frames available to be read in the buffer.
+ int frames_;
+
+ // Keeps track of the most recent time we've seen in case the |buffers_| is
+ // empty when our owner asks what time it is.
+ base::TimeDelta current_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBufferQueue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
diff --git a/chromium/media/base/audio_buffer_queue_unittest.cc b/chromium/media/base/audio_buffer_queue_unittest.cc
new file mode 100644
index 00000000000..b95bdca1454
--- /dev/null
+++ b/chromium/media/base/audio_buffer_queue_unittest.cc
@@ -0,0 +1,467 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_queue.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void VerifyResult(float* channel_data,
+ int frames,
+ float start,
+ float increment) {
+ for (int i = 0; i < frames; ++i) {
+ SCOPED_TRACE(base::StringPrintf(
+ "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
+ ASSERT_EQ(start, channel_data[i]);
+ start += increment;
+ }
+}
+
+TEST(AudioBufferQueueTest, AppendAndClear) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+ EXPECT_EQ(0, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(frames, buffer.frames());
+ buffer.Clear();
+ EXPECT_EQ(0, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 20, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(frames, buffer.frames());
+}
+
+TEST(AudioBufferQueueTest, MultipleAppend) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Append 40 frames in 5 buffers.
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(8, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(16, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(24, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(32, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
+ EXPECT_EQ(40, buffer.frames());
+}
+
+TEST(AudioBufferQueueTest, IteratorCheck) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
+ // iterator is pointing to the correct position.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 10.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(8, buffer.frames());
+
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ EXPECT_EQ(4, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
+
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 20.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(12, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 30.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(20, buffer.frames());
+
+ buffer.SeekFrames(16);
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ EXPECT_EQ(0, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
+
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 40.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(8, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 50.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(16, buffer.frames());
+
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ VerifyResult(bus->channel(0), 4, 40.0f, 1.0f);
+
+ // Read off the end of the buffer.
+ EXPECT_EQ(12, buffer.frames());
+ buffer.SeekFrames(8);
+ EXPECT_EQ(4, buffer.ReadFrames(100, 0, bus.get()));
+ VerifyResult(bus->channel(0), 4, 54.0f, 1.0f);
+}
+
+TEST(AudioBufferQueueTest, Seek) {
+ const int channels = 2;
+ const int frames = 6;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 6 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, frames, kNoTime, kNoTime));
+ EXPECT_EQ(6, buffer.frames());
+
+ // Seek past 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(4, buffer.frames());
+
+ // Seek to end of data.
+ buffer.SeekFrames(4);
+ EXPECT_EQ(0, buffer.frames());
+
+ // At end, seek now fails unless 0 specified.
+ buffer.SeekFrames(0);
+}
+
+TEST(AudioBufferQueueTest, ReadF32) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 76 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 13.0f, 1.0f, 10, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 33.0f, 1.0f, 60, kNoTime, kNoTime));
+ EXPECT_EQ(76, buffer.frames());
+
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
+ // 1, 3, 5, and ch[1] should be 2, 4, 6.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(3, buffer.ReadFrames(3, 0, bus.get()));
+ EXPECT_EQ(73, buffer.frames());
+ VerifyResult(bus->channel(0), 3, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 3, 2.0f, 2.0f);
+
+ // Now read 5 frames, which will span buffers. Append the data into AudioBus.
+ EXPECT_EQ(5, buffer.ReadFrames(5, 3, bus.get()));
+ EXPECT_EQ(68, buffer.frames());
+ VerifyResult(bus->channel(0), 8, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 8, 2.0f, 2.0f);
+
+ // Now skip into the third buffer.
+ buffer.SeekFrames(20);
+ EXPECT_EQ(48, buffer.frames());
+
+ // Now read 2 frames, which are in the third buffer.
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
+ VerifyResult(bus->channel(0), 2, 57.0f, 2.0f);
+ VerifyResult(bus->channel(1), 2, 58.0f, 2.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadU8) {
+ const int channels = 4;
+ const int frames = 4;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 4 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 128, 1, frames, kNoTime, kNoTime));
+
+ // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 128, 132, 136, 140, other channels similar. However, values are converted
+ // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
+ // value should be 0.0, then 1/127, 2/127, etc.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ EXPECT_EQ(0, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(2), 4, 2.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(3), 4, 3.0f / 127.0f, 4.0f / 127.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadS16) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 4, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 9, 1, 20, kNoTime, kNoTime));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12.
+ // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+}
+
+TEST(AudioBufferQueueTest, ReadS32) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ kSampleFormatS32, channels, 1, 1, 4, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ kSampleFormatS32, channels, 9, 1, 20, kNoTime, kNoTime));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109.
+ // Data is converted to float from -1.0 to 1.0 based on int32 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max);
+
+ // Read the next 2 frames.
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
+ EXPECT_EQ(16, buffer.frames());
+ VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max);
+}
+
+TEST(AudioBufferQueueTest, ReadF32Planar) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 14 frames of data.
+ buffer.Append(MakePlanarAudioBuffer<float>(
+ kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, kNoTime, kNoTime));
+ buffer.Append(MakePlanarAudioBuffer<float>(
+ kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, kNoTime, kNoTime));
+ EXPECT_EQ(14, buffer.frames());
+
+ // Read 6 frames from the buffer. F32 is planar, so ch[0] should be
+ // 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ EXPECT_EQ(8, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 1.0f, 1.0f);
+ VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f);
+ VerifyResult(bus->channel(1), 4, 5.0f, 1.0f);
+ VerifyResult(bus->channel(1) + 4, 2, 60.0f, 1.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadS16Planar) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakePlanarAudioBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 1, 1, 4, kNoTime, kNoTime));
+ buffer.Append(MakePlanarAudioBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 100, 5, 20, kNoTime, kNoTime));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is planar, so ch[0] should be
+ // 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205.
+ // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max);
+ VerifyResult(bus->channel(1), 4, 5.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(1) + 4, 2, 200.0f / kint16max, 5.0f / kint16max);
+}
+
+TEST(AudioBufferQueueTest, ReadManyChannels) {
+ const int channels = 16;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 76 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 0.0f, 1.0f, 6, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(kSampleFormatF32,
+ channels,
+ 16.0f * channels,
+ 1.0f,
+ 60,
+ kNoTime,
+ kNoTime));
+ EXPECT_EQ(76, buffer.frames());
+
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
+ // 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus.get()));
+ EXPECT_EQ(46, buffer.frames());
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus->channel(i), 30, static_cast<float>(i), 16.0f);
+ }
+}
+
+TEST(AudioBufferQueueTest, Peek) {
+ const int channels = 4;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+
+ // Add 60 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 0.0f, 1.0f, 60, kNoTime, kNoTime));
+ EXPECT_EQ(60, buffer.frames());
+
+ // Peek at the first 30 frames.
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100);
+ EXPECT_EQ(60, buffer.frames());
+ EXPECT_EQ(60, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
+ EXPECT_EQ(60, buffer.frames());
+
+ // Now read the next 30 frames (which should be the same as those peeked at).
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100);
+ EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus2.get()));
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus1->channel(i),
+ 30,
+ static_cast<float>(i),
+ static_cast<float>(channels));
+ VerifyResult(bus2->channel(i),
+ 30,
+ static_cast<float>(i),
+ static_cast<float>(channels));
+ }
+
+ // Peek 10 frames forward
+ EXPECT_EQ(5, buffer.PeekFrames(5, 10, 0, bus1.get()));
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus1->channel(i),
+ 5,
+ static_cast<float>(i + 40 * channels),
+ static_cast<float>(channels));
+ }
+
+ // Peek to the end of the buffer.
+ EXPECT_EQ(30, buffer.frames());
+ EXPECT_EQ(30, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
+}
+
+TEST(AudioBufferQueueTest, Time) {
+ const int channels = 2;
+ const base::TimeDelta start_time1;
+ const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30);
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Add two buffers (second one added later):
+ // first: start=0s, duration=10s
+ // second: start=30s, duration=10s
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, start_time1, duration));
+ EXPECT_EQ(10, buffer.frames());
+
+ // Check starting time.
+ EXPECT_EQ(start_time1, buffer.current_time());
+
+ // Read 2 frames, should be 2s in (since duration is 1s per sample).
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(2),
+ buffer.current_time());
+
+ // Skip 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(4),
+ buffer.current_time());
+
+ // Add second buffer for more data.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, start_time2, duration));
+ EXPECT_EQ(16, buffer.frames());
+
+ // Read until almost the end of buffer1.
+ EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(9),
+ buffer.current_time());
+
+ // Read 1 value, so time moved to buffer2.
+ EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
+ EXPECT_EQ(start_time2, buffer.current_time());
+
+ // Read all 10 frames in buffer2, timestamp should be last time from buffer2.
+ EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
+ EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
+ buffer.current_time());
+
+ // Try to read more frames (which don't exist), timestamp should remain.
+ EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
+ buffer.current_time());
+}
+
+TEST(AudioBufferQueueTest, NoTime) {
+ const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Add two buffers with no timestamps. Time should always be unknown.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
+ EXPECT_EQ(20, buffer.frames());
+
+ // Check starting time.
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Read 2 frames.
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Skip 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Read until almost the end of buffer1.
+ EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Read 1 value, so time moved to buffer2.
+ EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Read all 10 frames in buffer2.
+ EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
+
+ // Try to read more frames (which don't exist), timestamp should remain.
+ EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_buffer_unittest.cc b/chromium/media/base/audio_buffer_unittest.cc
new file mode 100644
index 00000000000..473778a6b53
--- /dev/null
+++ b/chromium/media/base/audio_buffer_unittest.cc
@@ -0,0 +1,290 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void VerifyResult(float* channel_data,
+ int frames,
+ float start,
+ float increment) {
+ for (int i = 0; i < frames; ++i) {
+ SCOPED_TRACE(base::StringPrintf(
+ "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
+ ASSERT_EQ(channel_data[i], start);
+ start += increment;
+ }
+}
+
+TEST(AudioBufferTest, CopyFrom) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 1, 1, frames, start_time, duration);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(buffer->timestamp(), start_time);
+ EXPECT_EQ(buffer->duration().InSeconds(), frames);
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(AudioBufferTest, CreateEOSBuffer) {
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEOSBuffer();
+ EXPECT_TRUE(buffer->end_of_stream());
+}
+
+TEST(AudioBufferTest, FrameSize) {
+ const uint8 kTestData[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31 };
+ const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
+ const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
+
+ const uint8* const data[] = { kTestData };
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
+ kSampleFormatU8, 2, 16, data, kTimestampA, kTimestampB);
+ EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data
+
+ buffer = AudioBuffer::CopyFrom(
+ kSampleFormatF32, 4, 2, data, kTimestampA, kTimestampB);
+ EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data
+}
+
+TEST(AudioBufferTest, ReadU8) {
+ const int channels = 4;
+ const int frames = 4;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 128, 1, frames, start_time, duration);
+
+ // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 128, 132, 136, 140, other channels similar. However, values are converted
+ // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
+ // value should be 0.0, then 1/127, 2/127, etc.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 0.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(1), frames, 1.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(2), frames, 2.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(3), frames, 3.0f / 127.0f, 4.0f / 127.0f);
+}
+
+TEST(AudioBufferTest, ReadS16) {
+ const int channels = 2;
+ const int frames = 10;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, frames, start_time, duration);
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
+ // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
+ // to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(6, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+
+ // Now read the same data one frame at a time.
+ bus = AudioBus::Create(channels, 100);
+ for (int i = 0; i < frames; ++i) {
+ buffer->ReadFrames(1, i, i, bus.get());
+ }
+ VerifyResult(bus->channel(0), frames, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 2.0f / kint16max, 2.0f / kint16max);
+}
+
+TEST(AudioBufferTest, ReadS32) {
+ const int channels = 2;
+ const int frames = 6;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
+ kSampleFormatS32, channels, 1, 1, frames, start_time, duration);
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
+ // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
+ // to float from -1.0 to 1.0 based on int32 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 1.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), frames, 2.0f / kint32max, 2.0f / kint32max);
+
+ // Now read 2 frames starting at frame offset 3. ch[0] should be 7, 9, and
+ // ch[1] should be 8, 10.
+ buffer->ReadFrames(2, 3, 0, bus.get());
+ VerifyResult(bus->channel(0), 2, 7.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 2, 8.0f / kint32max, 2.0f / kint32max);
+}
+
+TEST(AudioBufferTest, ReadF32) {
+ const int channels = 2;
+ const int frames = 20;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time, duration);
+
+ // Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
+ // be 1, 3, 5, ... and ch[1] should be 2, 4, 6, ...
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 10, 2.0f, 2.0f);
+
+ // Read second 10 frames.
+ bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(10, 10, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 21.0f, 2.0f);
+ VerifyResult(bus->channel(1), 10, 22.0f, 2.0f);
+}
+
+TEST(AudioBufferTest, ReadS16Planar) {
+ const int channels = 2;
+ const int frames = 20;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 1, 1, frames, start_time, duration);
+
+ // Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
+ // 4, 5, 6, and ch[1] should be 21, 22, 23, 24, 25, 26. Data is converted to
+ // float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(6, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 21.0f / kint16max, 1.0f / kint16max);
+
+ // Read all the frames backwards, one by one. ch[0] should be 20, 19, ...
+ bus = AudioBus::Create(channels, 100);
+ for (int i = 0; i < frames; ++i) {
+ buffer->ReadFrames(1, frames - i - 1, i, bus.get());
+ }
+ VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+
+ // Read 0 frames with different offsets. Existing data in AudioBus should be
+ // unchanged.
+ buffer->ReadFrames(0, 0, 0, bus.get());
+ buffer->ReadFrames(0, 0, 10, bus.get());
+ buffer->ReadFrames(0, 10, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
+ VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+}
+
+TEST(AudioBufferTest, ReadF32Planar) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer =
+ MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
+
+ // Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
+ // 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
+ // channels.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 1.0f, 1.0f);
+ VerifyResult(bus->channel(1), frames, 101.0f, 1.0f);
+ VerifyResult(bus->channel(2), frames, 201.0f, 1.0f);
+ VerifyResult(bus->channel(3), frames, 301.0f, 1.0f);
+
+ // Now read 20 frames from the middle of the buffer.
+ bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(20, 50, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 51.0f, 1.0f);
+ VerifyResult(bus->channel(1), 20, 151.0f, 1.0f);
+ VerifyResult(bus->channel(2), 20, 251.0f, 1.0f);
+ VerifyResult(bus->channel(3), 20, 351.0f, 1.0f);
+}
+
+TEST(AudioBufferTest, EmptyBuffer) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEmptyBuffer(
+ channels, frames, start_time, duration);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(start_time, buffer->timestamp());
+ EXPECT_EQ(frames, buffer->duration().InSeconds());
+ EXPECT_FALSE(buffer->end_of_stream());
+
+ // Read all 100 frames from the buffer. All data should be 0.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(1), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(2), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(3), frames, 0.0f, 0.0f);
+}
+
+TEST(AudioBufferTest, Trim) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer =
+ MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(start_time, buffer->timestamp());
+ EXPECT_EQ(frames, buffer->duration().InSeconds());
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 1.0f, 1.0f);
+
+ // Trim off 10 frames from the start.
+ buffer->TrimStart(10);
+ EXPECT_EQ(buffer->frame_count(), frames - 10);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(90));
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
+
+ // Trim off 10 frames from the end.
+ buffer->TrimEnd(10);
+ EXPECT_EQ(buffer->frame_count(), frames - 20);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(80));
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
+
+ // Trim off 50 more from the start.
+ buffer->TrimStart(50);
+ EXPECT_EQ(buffer->frame_count(), frames - 70);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(30));
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 61.0f, 1.0f);
+
+ // Trim off the last 30 frames.
+ buffer->TrimEnd(30);
+ EXPECT_EQ(buffer->frame_count(), 0);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(0));
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_bus.cc b/chromium/media/base/audio_bus.cc
new file mode 100644
index 00000000000..518d83cb4de
--- /dev/null
+++ b/chromium/media/base/audio_bus.cc
@@ -0,0 +1,331 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_bus.h"
+
+#include "base/logging.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/limits.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+static const uint8 kUint8Bias = 128;
+
+static bool IsAligned(void* ptr) {
+ return (reinterpret_cast<uintptr_t>(ptr) &
+ (AudioBus::kChannelAlignment - 1)) == 0U;
+}
+
+// Calculates the required size for an AudioBus with the given params, sets
+// |aligned_frames| to the actual frame length of each channel array.
+static int CalculateMemorySizeInternal(int channels, int frames,
+ int* out_aligned_frames) {
+ // Choose a size such that each channel will be aligned by
+ // kChannelAlignment when stored in a contiguous block.
+ int aligned_frames =
+ ((frames * sizeof(float) + AudioBus::kChannelAlignment - 1) &
+ ~(AudioBus::kChannelAlignment - 1)) / sizeof(float);
+
+ if (out_aligned_frames)
+ *out_aligned_frames = aligned_frames;
+
+ return sizeof(float) * channels * aligned_frames;
+}
+
+// |Format| is the destination type. If a bias is present, |Fixed| must be a
+// type larger than |Format| such that operations can be made without
+// overflowing. Without a bias |Fixed| must be the same as |Format|.
+template<class Format, class Fixed, Format Bias>
+static void FromInterleavedInternal(const void* src, int start_frame,
+ int frames, AudioBus* dest,
+ float min, float max) {
+ COMPILE_ASSERT((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
+ sizeof(Fixed) > sizeof(Format), invalid_deinterleave_types);
+ const Format* source = static_cast<const Format*>(src);
+ const int channels = dest->channels();
+ for (int ch = 0; ch < channels; ++ch) {
+ float* channel_data = dest->channel(ch);
+ for (int i = start_frame, offset = ch; i < start_frame + frames;
+ ++i, offset += channels) {
+ const Fixed v = static_cast<Fixed>(source[offset]) - Bias;
+ channel_data[i] = v * (v < 0 ? -min : max);
+ }
+ }
+}
+
+// |Format| is the destination type. If a bias is present, |Fixed| must be a
+// type larger than |Format| such that operations can be made without
+// overflowing. Without a bias |Fixed| must be the same as |Format|.
+template<class Format, class Fixed, Format Bias>
+static void ToInterleavedInternal(const AudioBus* source, int start_frame,
+ int frames, void* dst, Fixed min, Fixed max) {
+ COMPILE_ASSERT((Bias == 0 && sizeof(Fixed) == sizeof(Format)) ||
+ sizeof(Fixed) > sizeof(Format), invalid_interleave_types);
+ Format* dest = static_cast<Format*>(dst);
+ const int channels = source->channels();
+ for (int ch = 0; ch < channels; ++ch) {
+ const float* channel_data = source->channel(ch);
+ for (int i = start_frame, offset = ch; i < start_frame + frames;
+ ++i, offset += channels) {
+ const float v = channel_data[i];
+
+ Fixed sample;
+ if (v < 0)
+ sample = v <= -1 ? min : static_cast<Fixed>(-v * min);
+ else
+ sample = v >= 1 ? max : static_cast<Fixed>(v * max);
+
+ dest[offset] = static_cast<Format>(sample) + Bias;
+ }
+ }
+}
+
+static void ValidateConfig(size_t channels, int frames) {
+ CHECK_GT(frames, 0);
+ CHECK_LE(channels, static_cast<size_t>(limits::kMaxChannels));
+}
+
+static void CheckOverflow(int start_frame, int frames, int total_frames) {
+ CHECK_GE(start_frame, 0);
+ CHECK_GE(frames, 0);
+ CHECK_GT(total_frames, 0);
+ int sum = start_frame + frames;
+ CHECK_LE(sum, total_frames);
+ CHECK_GE(sum, 0);
+}
+
+AudioBus::AudioBus(int channels, int frames)
+ : frames_(frames),
+ can_set_channel_data_(false) {
+ ValidateConfig(channels, frames_);
+
+ int aligned_frames = 0;
+ int size = CalculateMemorySizeInternal(channels, frames, &aligned_frames);
+
+ data_.reset(static_cast<float*>(base::AlignedAlloc(
+ size, AudioBus::kChannelAlignment)));
+
+ BuildChannelData(channels, aligned_frames, data_.get());
+}
+
+AudioBus::AudioBus(int channels, int frames, float* data)
+ : frames_(frames),
+ can_set_channel_data_(false) {
+ // Since |data| may have come from an external source, ensure it's valid.
+ CHECK(data);
+ ValidateConfig(channels, frames_);
+
+ int aligned_frames = 0;
+ CalculateMemorySizeInternal(channels, frames, &aligned_frames);
+
+ BuildChannelData(channels, aligned_frames, data);
+}
+
+AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
+ : channel_data_(channel_data),
+ frames_(frames),
+ can_set_channel_data_(false) {
+ ValidateConfig(channel_data_.size(), frames_);
+
+ // Sanity check wrapped vector for alignment and channel count.
+ for (size_t i = 0; i < channel_data_.size(); ++i)
+ DCHECK(IsAligned(channel_data_[i]));
+}
+
+AudioBus::AudioBus(int channels)
+ : channel_data_(channels),
+ frames_(0),
+ can_set_channel_data_(true) {
+ for (size_t i = 0; i < channel_data_.size(); ++i)
+ channel_data_[i] = NULL;
+}
+
+AudioBus::~AudioBus() {}
+
+scoped_ptr<AudioBus> AudioBus::Create(int channels, int frames) {
+ return scoped_ptr<AudioBus>(new AudioBus(channels, frames));
+}
+
+scoped_ptr<AudioBus> AudioBus::Create(const AudioParameters& params) {
+ return scoped_ptr<AudioBus>(new AudioBus(
+ params.channels(), params.frames_per_buffer()));
+}
+
+scoped_ptr<AudioBus> AudioBus::CreateWrapper(int channels) {
+ return scoped_ptr<AudioBus>(new AudioBus(channels));
+}
+
+scoped_ptr<AudioBus> AudioBus::WrapVector(
+ int frames, const std::vector<float*>& channel_data) {
+ return scoped_ptr<AudioBus>(new AudioBus(frames, channel_data));
+}
+
+scoped_ptr<AudioBus> AudioBus::WrapMemory(int channels, int frames,
+ void* data) {
+ // |data| must be aligned by AudioBus::kChannelAlignment.
+ CHECK(IsAligned(data));
+ return scoped_ptr<AudioBus>(new AudioBus(
+ channels, frames, static_cast<float*>(data)));
+}
+
+scoped_ptr<AudioBus> AudioBus::WrapMemory(const AudioParameters& params,
+ void* data) {
+ // |data| must be aligned by AudioBus::kChannelAlignment.
+ CHECK(IsAligned(data));
+ return scoped_ptr<AudioBus>(new AudioBus(
+ params.channels(), params.frames_per_buffer(),
+ static_cast<float*>(data)));
+}
+
+void AudioBus::SetChannelData(int channel, float* data) {
+ CHECK(can_set_channel_data_);
+ CHECK(data);
+ CHECK_GE(channel, 0);
+ CHECK_LT(static_cast<size_t>(channel), channel_data_.size());
+ DCHECK(IsAligned(data));
+ channel_data_[channel] = data;
+}
+
+void AudioBus::set_frames(int frames) {
+ CHECK(can_set_channel_data_);
+ frames_ = frames;
+}
+
+void AudioBus::ZeroFramesPartial(int start_frame, int frames) {
+ CheckOverflow(start_frame, frames, frames_);
+
+ if (frames <= 0)
+ return;
+
+ for (size_t i = 0; i < channel_data_.size(); ++i) {
+ memset(channel_data_[i] + start_frame, 0,
+ frames * sizeof(*channel_data_[i]));
+ }
+}
+
+void AudioBus::ZeroFrames(int frames) {
+ ZeroFramesPartial(0, frames);
+}
+
+void AudioBus::Zero() {
+ ZeroFrames(frames_);
+}
+
+int AudioBus::CalculateMemorySize(const AudioParameters& params) {
+ return CalculateMemorySizeInternal(
+ params.channels(), params.frames_per_buffer(), NULL);
+}
+
+int AudioBus::CalculateMemorySize(int channels, int frames) {
+ return CalculateMemorySizeInternal(channels, frames, NULL);
+}
+
+void AudioBus::BuildChannelData(int channels, int aligned_frames, float* data) {
+ DCHECK(IsAligned(data));
+ DCHECK_EQ(channel_data_.size(), 0U);
+ // Separate audio data out into channels for easy lookup later. Figure out
+ channel_data_.reserve(channels);
+ for (int i = 0; i < channels; ++i)
+ channel_data_.push_back(data + i * aligned_frames);
+}
+
+// TODO(dalecurtis): See if intrinsic optimizations help any here.
+void AudioBus::FromInterleavedPartial(const void* source, int start_frame,
+ int frames, int bytes_per_sample) {
+ CheckOverflow(start_frame, frames, frames_);
+ switch (bytes_per_sample) {
+ case 1:
+ FromInterleavedInternal<uint8, int16, kUint8Bias>(
+ source, start_frame, frames, this,
+ 1.0f / kint8min, 1.0f / kint8max);
+ break;
+ case 2:
+ FromInterleavedInternal<int16, int16, 0>(
+ source, start_frame, frames, this,
+ 1.0f / kint16min, 1.0f / kint16max);
+ break;
+ case 4:
+ FromInterleavedInternal<int32, int32, 0>(
+ source, start_frame, frames, this,
+ 1.0f / kint32min, 1.0f / kint32max);
+ break;
+ default:
+ NOTREACHED() << "Unsupported bytes per sample encountered.";
+ ZeroFramesPartial(start_frame, frames);
+ return;
+ }
+
+ // Don't clear remaining frames if this is a partial deinterleave.
+ if (!start_frame) {
+ // Zero any remaining frames.
+ ZeroFramesPartial(frames, frames_ - frames);
+ }
+}
+
+void AudioBus::FromInterleaved(const void* source, int frames,
+ int bytes_per_sample) {
+ FromInterleavedPartial(source, 0, frames, bytes_per_sample);
+}
+
+void AudioBus::ToInterleaved(int frames, int bytes_per_sample,
+ void* dest) const {
+ ToInterleavedPartial(0, frames, bytes_per_sample, dest);
+}
+
+// TODO(dalecurtis): See if intrinsic optimizations help any here.
+void AudioBus::ToInterleavedPartial(int start_frame, int frames,
+ int bytes_per_sample, void* dest) const {
+ CheckOverflow(start_frame, frames, frames_);
+ switch (bytes_per_sample) {
+ case 1:
+ ToInterleavedInternal<uint8, int16, kUint8Bias>(
+ this, start_frame, frames, dest, kint8min, kint8max);
+ break;
+ case 2:
+ ToInterleavedInternal<int16, int16, 0>(
+ this, start_frame, frames, dest, kint16min, kint16max);
+ break;
+ case 4:
+ ToInterleavedInternal<int32, int32, 0>(
+ this, start_frame, frames, dest, kint32min, kint32max);
+ break;
+ default:
+ NOTREACHED() << "Unsupported bytes per sample encountered.";
+ memset(dest, 0, frames * bytes_per_sample);
+ return;
+ }
+}
+
+void AudioBus::CopyTo(AudioBus* dest) const {
+ CopyPartialFramesTo(0, frames(), 0, dest);
+}
+
+void AudioBus::CopyPartialFramesTo(int source_start_frame,
+ int frame_count,
+ int dest_start_frame,
+ AudioBus* dest) const {
+ CHECK_EQ(channels(), dest->channels());
+ CHECK_LE(source_start_frame + frame_count, frames());
+ CHECK_LE(dest_start_frame + frame_count, dest->frames());
+
+ // Since we don't know if the other AudioBus is wrapped or not (and we don't
+ // want to care), just copy using the public channel() accessors.
+ for (int i = 0; i < channels(); ++i) {
+ memcpy(dest->channel(i) + dest_start_frame,
+ channel(i) + source_start_frame,
+ sizeof(*channel(i)) * frame_count);
+ }
+}
+
+void AudioBus::Scale(float volume) {
+ if (volume > 0 && volume != 1) {
+ for (int i = 0; i < channels(); ++i)
+ vector_math::FMUL(channel(i), volume, frames(), channel(i));
+ } else if (volume == 0) {
+ Zero();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_bus.h b/chromium/media/base/audio_bus.h
new file mode 100644
index 00000000000..dbb49ca57fc
--- /dev/null
+++ b/chromium/media/base/audio_bus.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUS_H_
+#define MEDIA_BASE_AUDIO_BUS_H_
+
+#include <vector>
+
+#include "base/memory/aligned_memory.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+class AudioParameters;
+
+// Scoped container for "busing" audio channel data around. Each channel is
+// stored in planar format and guaranteed to be aligned by kChannelAlignment.
+// AudioBus objects can be created normally or via wrapping. Normally, AudioBus
+// will dice up a contiguous memory block for channel data. When wrapped,
+// AudioBus instead routes requests for channel data to the wrapped object.
+class MEDIA_EXPORT AudioBus {
+ public:
+ // Guaranteed alignment of each channel's data; use 16-byte alignment for easy
+ // SSE optimizations.
+ enum { kChannelAlignment = 16 };
+
+ // Creates a new AudioBus and allocates |channels| of length |frames|. Uses
+ // channels() and frames_per_buffer() from AudioParameters if given.
+ static scoped_ptr<AudioBus> Create(int channels, int frames);
+ static scoped_ptr<AudioBus> Create(const AudioParameters& params);
+
+ // Creates a new AudioBus with the given number of channels, but zero length.
+ // It's expected to be used with SetChannelData() and set_frames() to
+ // wrap externally allocated memory.
+ static scoped_ptr<AudioBus> CreateWrapper(int channels);
+
+ // Creates a new AudioBus from an existing channel vector. Does not transfer
+ // ownership of |channel_data| to AudioBus; i.e., |channel_data| must outlive
+ // the returned AudioBus. Each channel must be aligned by kChannelAlignment.
+ static scoped_ptr<AudioBus> WrapVector(
+ int frames, const std::vector<float*>& channel_data);
+
+ // Creates a new AudioBus by wrapping an existing block of memory. Block must
+ // be at least CalculateMemorySize() bytes in size. |data| must outlive the
+ // returned AudioBus. |data| must be aligned by kChannelAlignment.
+ static scoped_ptr<AudioBus> WrapMemory(int channels, int frames, void* data);
+ static scoped_ptr<AudioBus> WrapMemory(const AudioParameters& params,
+ void* data);
+ // Returns the required memory size to use the WrapMemory() method.
+ static int CalculateMemorySize(const AudioParameters& params);
+
+ // Calculates the required size for an AudioBus given the number of channels
+ // and frames.
+ static int CalculateMemorySize(int channels, int frames);
+
+ // Helper methods for converting an AudioBus from and to interleaved integer
+ // data. Expects interleaving to be [ch0, ch1, ..., chN, ch0, ch1, ...] with
+ // |bytes_per_sample| per value. Values are scaled and bias corrected during
+ // conversion. ToInterleaved() will also clip values to format range.
+ // Handles uint8, int16, and int32 currently. FromInterleaved() will zero out
+ // any unfilled frames when |frames| is less than frames().
+ void FromInterleaved(const void* source, int frames, int bytes_per_sample);
+ void ToInterleaved(int frames, int bytes_per_sample, void* dest) const;
+ void ToInterleavedPartial(int start_frame, int frames, int bytes_per_sample,
+ void* dest) const;
+
+ // Similar to FromInterleaved() above, but meant for streaming sources. Does
+ // not zero out remaining frames, the caller is responsible for doing so using
+ // ZeroFramesPartial(). Frames are deinterleaved from the start of |source|
+ // to channel(x)[start_frame].
+ void FromInterleavedPartial(const void* source, int start_frame, int frames,
+ int bytes_per_sample);
+
+ // Helper method for copying channel data from one AudioBus to another. Both
+ // AudioBus object must have the same frames() and channels().
+ void CopyTo(AudioBus* dest) const;
+
+ // Helper method to copy frames from one AudioBus to another. Both AudioBus
+ // objects must have the same number of channels(). |source_start_frame| is
+ // the starting offset. |dest_start_frame| is the starting offset in |dest|.
+ // |frame_count| is the number of frames to copy.
+ void CopyPartialFramesTo(int source_start_frame,
+ int frame_count,
+ int dest_start_frame,
+ AudioBus* dest) const;
+
+ // Returns a raw pointer to the requested channel. Pointer is guaranteed to
+ // have a 16-byte alignment. Warning: Do not rely on having sane (i.e. not
+ // inf, nan, or between [-1.0, 1.0]) values in the channel data.
+ float* channel(int channel) { return channel_data_[channel]; }
+ const float* channel(int channel) const { return channel_data_[channel]; }
+ void SetChannelData(int channel, float* data);
+
+ int channels() const { return static_cast<int>(channel_data_.size()); }
+ int frames() const { return frames_; }
+ void set_frames(int frames);
+
+ // Helper method for zeroing out all channels of audio data.
+ void Zero();
+ void ZeroFrames(int frames);
+ void ZeroFramesPartial(int start_frame, int frames);
+
+ // Scale internal channel values by |volume| >= 0. If an invalid value
+ // is provided, no adjustment is done.
+ void Scale(float volume);
+
+ private:
+ friend struct base::DefaultDeleter<AudioBus>;
+ ~AudioBus();
+
+ AudioBus(int channels, int frames);
+ AudioBus(int channels, int frames, float* data);
+ AudioBus(int frames, const std::vector<float*>& channel_data);
+ explicit AudioBus(int channels);
+
+ // Helper method for building |channel_data_| from a block of memory. |data|
+ // must be at least BlockSize() bytes in size.
+ void BuildChannelData(int channels, int aligned_frame, float* data);
+
+ // Contiguous block of channel memory.
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+
+ std::vector<float*> channel_data_;
+ int frames_;
+
+ // Protect SetChannelData() and set_frames() for use by CreateWrapper().
+ bool can_set_channel_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBus);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUS_H_
diff --git a/chromium/media/base/audio_bus_unittest.cc b/chromium/media/base/audio_bus_unittest.cc
new file mode 100644
index 00000000000..a82090bd8e8
--- /dev/null
+++ b/chromium/media/base/audio_bus_unittest.cc
@@ -0,0 +1,472 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/channel_layout.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kChannels = 6;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_5_1;
+// Use a buffer size which is intentionally not a multiple of kChannelAlignment.
+static const int kFrameCount = media::AudioBus::kChannelAlignment * 32 - 1;
+static const int kSampleRate = 48000;
+
+class AudioBusTest : public testing::Test {
+ public:
+ AudioBusTest() {}
+ virtual ~AudioBusTest() {
+ for (size_t i = 0; i < data_.size(); ++i)
+ base::AlignedFree(data_[i]);
+ }
+
+ // Validate parameters returned by AudioBus v.s. the constructed parameters.
+ void VerifyParams(AudioBus* bus) {
+ EXPECT_EQ(kChannels, bus->channels());
+ EXPECT_EQ(kFrameCount, bus->frames());
+ }
+
+ void VerifyValue(const float data[], int size, float value) {
+ for (int i = 0; i < size; ++i)
+ ASSERT_FLOAT_EQ(value, data[i]) << "i=" << i;
+ }
+
+ // Verify values for each channel in |result| are within |epsilon| of
+ // |expected|. If |epsilon| exactly equals 0, uses FLOAT_EQ macro.
+ void VerifyBusWithEpsilon(const AudioBus* result, const AudioBus* expected,
+ float epsilon) {
+ ASSERT_EQ(expected->channels(), result->channels());
+ ASSERT_EQ(expected->frames(), result->frames());
+ for (int ch = 0; ch < result->channels(); ++ch) {
+ for (int i = 0; i < result->frames(); ++i) {
+ SCOPED_TRACE(base::StringPrintf("ch=%d, i=%d", ch, i));
+ if (epsilon == 0) {
+ ASSERT_FLOAT_EQ(expected->channel(ch)[i], result->channel(ch)[i]);
+ } else {
+ ASSERT_NEAR(expected->channel(ch)[i], result->channel(ch)[i],
+ epsilon);
+ }
+ }
+ }
+ }
+
+ // Verify values for each channel in |result| against |expected|.
+ void VerifyBus(const AudioBus* result, const AudioBus* expected) {
+ VerifyBusWithEpsilon(result, expected, 0);
+ }
+
+ // Read and write to the full extent of the allocated channel data. Also test
+ // the Zero() method and verify it does as advertised. Also test data if data
+ // is 16-byte aligned as advertised (see kChannelAlignment in audio_bus.h).
+ void VerifyChannelData(AudioBus* bus) {
+ for (int i = 0; i < bus->channels(); ++i) {
+ ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(
+ bus->channel(i)) & (AudioBus::kChannelAlignment - 1));
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i);
+ }
+
+ for (int i = 0; i < bus->channels(); ++i)
+ VerifyValue(bus->channel(i), bus->frames(), i);
+
+ bus->Zero();
+ for (int i = 0; i < bus->channels(); ++i)
+ VerifyValue(bus->channel(i), bus->frames(), 0);
+ }
+
+ // Verify copying to and from |bus1| and |bus2|.
+ void CopyTest(AudioBus* bus1, AudioBus* bus2) {
+ // Fill |bus1| with dummy data.
+ for (int i = 0; i < bus1->channels(); ++i)
+ std::fill(bus1->channel(i), bus1->channel(i) + bus1->frames(), i);
+
+ // Verify copy from |bus1| to |bus2|.
+ bus2->Zero();
+ bus1->CopyTo(bus2);
+ VerifyBus(bus1, bus2);
+
+ // Verify copy from |bus2| to |bus1|.
+ bus1->Zero();
+ bus2->CopyTo(bus1);
+ VerifyBus(bus2, bus1);
+ }
+
+ protected:
+ std::vector<float*> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBusTest);
+};
+
+// Verify basic Create(...) method works as advertised.
+TEST_F(AudioBusTest, Create) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
+ VerifyParams(bus.get());
+ VerifyChannelData(bus.get());
+}
+
+// Verify Create(...) using AudioParameters works as advertised.
+TEST_F(AudioBusTest, CreateUsingAudioParameters) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
+ kFrameCount));
+ VerifyParams(bus.get());
+ VerifyChannelData(bus.get());
+}
+
+// Verify an AudioBus created via wrapping a vector works as advertised.
+TEST_F(AudioBusTest, WrapVector) {
+ data_.reserve(kChannels);
+ for (int i = 0; i < kChannels; ++i) {
+ data_.push_back(static_cast<float*>(base::AlignedAlloc(
+ sizeof(*data_[i]) * kFrameCount, AudioBus::kChannelAlignment)));
+ }
+
+ scoped_ptr<AudioBus> bus = AudioBus::WrapVector(kFrameCount, data_);
+ VerifyParams(bus.get());
+ VerifyChannelData(bus.get());
+}
+
+// Verify an AudioBus created via wrapping a memory block works as advertised.
+TEST_F(AudioBusTest, WrapMemory) {
+ AudioParameters params(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
+ kFrameCount);
+ int data_size = AudioBus::CalculateMemorySize(params);
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
+ base::AlignedAlloc(data_size, AudioBus::kChannelAlignment)));
+
+ // Fill the memory with a test value we can check for after wrapping.
+ static const float kTestValue = 3;
+ std::fill(
+ data.get(), data.get() + data_size / sizeof(*data.get()), kTestValue);
+
+ scoped_ptr<AudioBus> bus = AudioBus::WrapMemory(params, data.get());
+ // Verify the test value we filled prior to wrapping.
+ for (int i = 0; i < bus->channels(); ++i)
+ VerifyValue(bus->channel(i), bus->frames(), kTestValue);
+ VerifyParams(bus.get());
+ VerifyChannelData(bus.get());
+
+ // Verify the channel vectors lie within the provided memory block.
+ EXPECT_GE(bus->channel(0), data.get());
+ EXPECT_LT(bus->channel(bus->channels() - 1) + bus->frames(),
+ data.get() + data_size / sizeof(*data.get()));
+}
+
+// Simulate a shared memory transfer and verify results.
+TEST_F(AudioBusTest, CopyTo) {
+ // Create one bus with AudioParameters and the other through direct values to
+ // test for parity between the Create() functions.
+ AudioParameters params(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
+ kFrameCount);
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(kChannels, kFrameCount);
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(params);
+
+ {
+ SCOPED_TRACE("Created");
+ CopyTest(bus1.get(), bus2.get());
+ }
+ {
+ SCOPED_TRACE("Wrapped Vector");
+ // Try a copy to an AudioBus wrapping a vector.
+ data_.reserve(kChannels);
+ for (int i = 0; i < kChannels; ++i) {
+ data_.push_back(static_cast<float*>(base::AlignedAlloc(
+ sizeof(*data_[i]) * kFrameCount, AudioBus::kChannelAlignment)));
+ }
+
+ bus2 = AudioBus::WrapVector(kFrameCount, data_);
+ CopyTest(bus1.get(), bus2.get());
+ }
+ {
+ SCOPED_TRACE("Wrapped Memory");
+ // Try a copy to an AudioBus wrapping a memory block.
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(
+ static_cast<float*>(base::AlignedAlloc(
+ AudioBus::CalculateMemorySize(params),
+ AudioBus::kChannelAlignment)));
+
+ bus2 = AudioBus::WrapMemory(params, data.get());
+ CopyTest(bus1.get(), bus2.get());
+ }
+}
+
+// Verify Zero() and ZeroFrames(...) utility methods work as advertised.
+TEST_F(AudioBusTest, Zero) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
+
+ // Fill the bus with dummy data.
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
+
+ // Zero first half the frames of each channel.
+ bus->ZeroFrames(kFrameCount / 2);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("First Half Zero");
+ VerifyValue(bus->channel(i), kFrameCount / 2, 0);
+ VerifyValue(bus->channel(i) + kFrameCount / 2,
+ kFrameCount - kFrameCount / 2, i + 1);
+ }
+
+ // Fill the bus with dummy data.
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
+
+ // Zero the last half of the frames.
+ bus->ZeroFramesPartial(kFrameCount / 2, kFrameCount - kFrameCount / 2);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("Last Half Zero");
+ VerifyValue(bus->channel(i) + kFrameCount / 2,
+ kFrameCount - kFrameCount / 2, 0);
+ VerifyValue(bus->channel(i), kFrameCount / 2, i + 1);
+ }
+
+ // Fill the bus with dummy data.
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
+
+ // Zero all the frames of each channel.
+ bus->Zero();
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("All Zero");
+ VerifyValue(bus->channel(i), bus->frames(), 0);
+ }
+}
+
+// Each test vector represents two channels of data in the following arbitrary
+// layout: <min, zero, max, min, max / 2, min / 2, zero, max, zero, zero>.
+static const int kTestVectorSize = 10;
+static const uint8 kTestVectorUint8[kTestVectorSize] = {
+ 0, -kint8min, kuint8max, 0, kint8max / 2 + 128, kint8min / 2 + 128,
+ -kint8min, kuint8max, -kint8min, -kint8min };
+static const int16 kTestVectorInt16[kTestVectorSize] = {
+ kint16min, 0, kint16max, kint16min, kint16max / 2, kint16min / 2,
+ 0, kint16max, 0, 0 };
+static const int32 kTestVectorInt32[kTestVectorSize] = {
+ kint32min, 0, kint32max, kint32min, kint32max / 2, kint32min / 2,
+ 0, kint32max, 0, 0 };
+
+// Expected results.
+static const int kTestVectorFrames = kTestVectorSize / 2;
+static const float kTestVectorResult[][kTestVectorFrames] = {
+ { -1, 1, 0.5, 0, 0 }, { 0, -1, -0.5, 1, 0 }};
+static const int kTestVectorChannels = arraysize(kTestVectorResult);
+
+// Verify FromInterleaved() deinterleaves audio in supported formats correctly.
+TEST_F(AudioBusTest, FromInterleaved) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ scoped_ptr<AudioBus> expected = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ for (int ch = 0; ch < kTestVectorChannels; ++ch) {
+ memcpy(expected->channel(ch), kTestVectorResult[ch],
+ kTestVectorFrames * sizeof(*expected->channel(ch)));
+ }
+ {
+ SCOPED_TRACE("uint8");
+ bus->Zero();
+ bus->FromInterleaved(
+ kTestVectorUint8, kTestVectorFrames, sizeof(*kTestVectorUint8));
+ // Biased uint8 calculations have poor precision, so the epsilon here is
+ // slightly more permissive than int16 and int32 calculations.
+ VerifyBusWithEpsilon(bus.get(), expected.get(), 1.0f / (kuint8max - 1));
+ }
+ {
+ SCOPED_TRACE("int16");
+ bus->Zero();
+ bus->FromInterleaved(
+ kTestVectorInt16, kTestVectorFrames, sizeof(*kTestVectorInt16));
+ VerifyBusWithEpsilon(bus.get(), expected.get(), 1.0f / (kuint16max + 1.0f));
+ }
+ {
+ SCOPED_TRACE("int32");
+ bus->Zero();
+ bus->FromInterleaved(
+ kTestVectorInt32, kTestVectorFrames, sizeof(*kTestVectorInt32));
+ VerifyBusWithEpsilon(bus.get(), expected.get(), 1.0f / (kuint32max + 1.0f));
+ }
+}
+
+// Verify FromInterleavedPartial() deinterleaves audio correctly.
+TEST_F(AudioBusTest, FromInterleavedPartial) {
+ // Only deinterleave the middle two frames in each channel.
+ static const int kPartialStart = 1;
+ static const int kPartialFrames = 2;
+ ASSERT_LE(kPartialStart + kPartialFrames, kTestVectorFrames);
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ scoped_ptr<AudioBus> expected = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ expected->Zero();
+ for (int ch = 0; ch < kTestVectorChannels; ++ch) {
+ memcpy(expected->channel(ch) + kPartialStart,
+ kTestVectorResult[ch] + kPartialStart,
+ kPartialFrames * sizeof(*expected->channel(ch)));
+ }
+
+ bus->Zero();
+ bus->FromInterleavedPartial(
+ kTestVectorInt32 + kPartialStart * bus->channels(), kPartialStart,
+ kPartialFrames, sizeof(*kTestVectorInt32));
+ VerifyBus(bus.get(), expected.get());
+}
+
+// Verify ToInterleaved() interleaves audio in suported formats correctly.
+TEST_F(AudioBusTest, ToInterleaved) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ // Fill the bus with our test vector.
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ memcpy(bus->channel(ch), kTestVectorResult[ch],
+ kTestVectorFrames * sizeof(*bus->channel(ch)));
+ }
+ {
+ SCOPED_TRACE("uint8");
+ uint8 test_array[arraysize(kTestVectorUint8)];
+ bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorUint8), test_array);
+ ASSERT_EQ(memcmp(
+ test_array, kTestVectorUint8, sizeof(kTestVectorUint8)), 0);
+ }
+ {
+ SCOPED_TRACE("int16");
+ int16 test_array[arraysize(kTestVectorInt16)];
+ bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt16), test_array);
+ ASSERT_EQ(memcmp(
+ test_array, kTestVectorInt16, sizeof(kTestVectorInt16)), 0);
+ }
+ {
+ SCOPED_TRACE("int32");
+ int32 test_array[arraysize(kTestVectorInt32)];
+ bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt32), test_array);
+
+ // Some compilers get better precision than others on the half-max test, so
+ // let the test pass with an off by one check on the half-max.
+ int32 fixed_test_array[arraysize(kTestVectorInt32)];
+ memcpy(fixed_test_array, kTestVectorInt32, sizeof(kTestVectorInt32));
+ ASSERT_EQ(fixed_test_array[4], kint32max / 2);
+ fixed_test_array[4]++;
+
+ ASSERT_TRUE(
+ memcmp(test_array, kTestVectorInt32, sizeof(kTestVectorInt32)) == 0 ||
+ memcmp(test_array, fixed_test_array, sizeof(fixed_test_array)) == 0);
+ }
+}
+
+// Verify ToInterleavedPartial() interleaves audio correctly.
+TEST_F(AudioBusTest, ToInterleavedPartial) {
+ // Only interleave the middle two frames in each channel.
+ static const int kPartialStart = 1;
+ static const int kPartialFrames = 2;
+ ASSERT_LE(kPartialStart + kPartialFrames, kTestVectorFrames);
+
+ scoped_ptr<AudioBus> expected = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ for (int ch = 0; ch < kTestVectorChannels; ++ch) {
+ memcpy(expected->channel(ch), kTestVectorResult[ch],
+ kTestVectorFrames * sizeof(*expected->channel(ch)));
+ }
+
+ int16 test_array[arraysize(kTestVectorInt16)];
+ expected->ToInterleavedPartial(
+ kPartialStart, kPartialFrames, sizeof(*kTestVectorInt16), test_array);
+ ASSERT_EQ(memcmp(
+ test_array, kTestVectorInt16 + kPartialStart * kTestVectorChannels,
+ kPartialFrames * sizeof(*kTestVectorInt16) * kTestVectorChannels), 0);
+}
+
+TEST_F(AudioBusTest, Scale) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
+
+ // Fill the bus with dummy data.
+ static const float kFillValue = 1;
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), kFillValue);
+
+ // Adjust by an invalid volume and ensure volume is unchanged.
+ bus->Scale(-1);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("Invalid Scale");
+ VerifyValue(bus->channel(i), bus->frames(), kFillValue);
+ }
+
+ // Verify correct volume adjustment.
+ static const float kVolume = 0.5;
+ bus->Scale(kVolume);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("Half Scale");
+ VerifyValue(bus->channel(i), bus->frames(), kFillValue * kVolume);
+ }
+
+ // Verify zero volume case.
+ bus->Scale(0);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("Zero Scale");
+ VerifyValue(bus->channel(i), bus->frames(), 0);
+ }
+}
+
+// Benchmark the FromInterleaved() and ToInterleaved() methods.
+TEST_F(AudioBusTest, DISABLED_InterleaveBench) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(2, 48000 * 120);
+ const int frame_size = bus->frames() * bus->channels();
+ FakeAudioRenderCallback callback(0.2);
+ callback.Render(bus.get(), 0);
+ {
+ SCOPED_TRACE("uint8");
+ scoped_ptr<uint8> interleaved(new uint8[frame_size]);
+ const int byte_size = sizeof(*interleaved);
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("ToInterleaved uint8 took %.2fms.\n", total_time_ms);
+
+ start = base::TimeTicks::HighResNow();
+ bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
+ total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("FromInterleaved uint8 took %.2fms.\n", total_time_ms);
+ }
+ {
+ SCOPED_TRACE("int16");
+ scoped_ptr<int16> interleaved(new int16[frame_size]);
+ const int byte_size = sizeof(*interleaved);
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("ToInterleaved int16 took %.2fms.\n", total_time_ms);
+
+ start = base::TimeTicks::HighResNow();
+ bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
+ total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("FromInterleaved int16 took %.2fms.\n", total_time_ms);
+ }
+ {
+ SCOPED_TRACE("int32");
+ scoped_ptr<int32> interleaved(new int32[frame_size]);
+ const int byte_size = sizeof(*interleaved);
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("ToInterleaved int32 took %.2fms.\n", total_time_ms);
+
+ start = base::TimeTicks::HighResNow();
+ bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
+ total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("FromInterleaved int32 took %.2fms.\n", total_time_ms);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_capturer_source.h b/chromium/media/base/audio_capturer_source.h
new file mode 100644
index 00000000000..deae5e22dc1
--- /dev/null
+++ b/chromium/media/base/audio_capturer_source.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
+#define MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
+
+#include <string>
+#include <vector>
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// AudioCapturerSource is an interface representing the source for
+// captured audio. An implementation will periodically call Capture() on a
+// callback object.
+class AudioCapturerSource
+ : public base::RefCountedThreadSafe<media::AudioCapturerSource> {
+ public:
+ class CaptureCallback {
+ public:
+ // Callback to deliver the captured data from the OS.
+ virtual void Capture(AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ double volume) = 0;
+
+ // Signals an error has occurred.
+ virtual void OnCaptureError() = 0;
+
+ protected:
+ virtual ~CaptureCallback() {}
+ };
+
+ // Sets information about the audio stream format and the device
+ // to be used. It must be called before any of the other methods.
+ // The |session_id| is used by the browser to identify which input device to
+ // be used. For clients who do not care about device permission and device
+ // selection, pass |session_id| using
+ // AudioInputDeviceManager::kFakeOpenSessionId.
+ virtual void Initialize(const AudioParameters& params,
+ CaptureCallback* callback,
+ int session_id) = 0;
+
+ // Starts the audio recording.
+ virtual void Start() = 0;
+
+ // Stops the audio recording. This API is synchronous, and no more data
+ // callback will be passed to the client after it is being called.
+ virtual void Stop() = 0;
+
+ // Sets the capture volume, with range [0.0, 1.0] inclusive.
+ virtual void SetVolume(double volume) = 0;
+
+ // Enables or disables the WebRtc AGC control.
+ virtual void SetAutomaticGainControl(bool enable) = 0;
+
+ protected:
+ friend class base::RefCountedThreadSafe<AudioCapturerSource>;
+ virtual ~AudioCapturerSource() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
diff --git a/chromium/media/base/audio_converter.cc b/chromium/media/base/audio_converter.cc
new file mode 100644
index 00000000000..ac82e314ba3
--- /dev/null
+++ b/chromium/media/base/audio_converter.cc
@@ -0,0 +1,248 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// AudioConverter implementation. Uses MultiChannelSincResampler for resampling
+// audio, ChannelMixer for channel mixing, and AudioPullFifo for buffering.
+//
+// Delay estimates are provided to InputCallbacks based on the frame delay
+// information reported via the resampler and FIFO units.
+
+#include "media/base/audio_converter.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_pull_fifo.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/multi_channel_resampler.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+AudioConverter::AudioConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ bool disable_fifo)
+ : downmix_early_(false),
+ resampler_frame_delay_(0),
+ input_channel_count_(input_params.channels()) {
+ CHECK(input_params.IsValid());
+ CHECK(output_params.IsValid());
+
+ // Handle different input and output channel layouts.
+ if (input_params.channel_layout() != output_params.channel_layout()) {
+ DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout()
+ << " to " << output_params.channel_layout() << "; from "
+ << input_params.channels() << " channels to "
+ << output_params.channels() << " channels.";
+ channel_mixer_.reset(new ChannelMixer(input_params, output_params));
+
+ // Pare off data as early as we can for efficiency.
+ downmix_early_ = input_params.channels() > output_params.channels();
+ if (downmix_early_) {
+ DVLOG(1) << "Remixing channel layout prior to resampling.";
+ // |unmixed_audio_| will be allocated on the fly.
+ } else {
+ // Instead, if we're not downmixing early we need a temporary AudioBus
+ // which matches the input channel count but uses the output frame size
+ // since we'll mix into the AudioBus from the output stream.
+ unmixed_audio_ = AudioBus::Create(
+ input_params.channels(), output_params.frames_per_buffer());
+ }
+ }
+
+ // Only resample if necessary since it's expensive.
+ if (input_params.sample_rate() != output_params.sample_rate()) {
+ DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
+ << output_params.sample_rate();
+ const double io_sample_rate_ratio = input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
+ const int request_size = disable_fifo ? SincResampler::kDefaultRequestSize :
+ input_params.frames_per_buffer();
+ resampler_.reset(new MultiChannelResampler(
+ downmix_early_ ? output_params.channels() :
+ input_params.channels(),
+ io_sample_rate_ratio, request_size, base::Bind(
+ &AudioConverter::ProvideInput, base::Unretained(this))));
+ }
+
+ input_frame_duration_ = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(input_params.sample_rate()));
+ output_frame_duration_ = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(output_params.sample_rate()));
+
+ // The resampler can be configured to work with a specific request size, so a
+ // FIFO is not necessary when resampling.
+ if (disable_fifo || resampler_)
+ return;
+
+ // Since the output device may want a different buffer size than the caller
+ // asked for, we need to use a FIFO to ensure that both sides read in chunk
+ // sizes they're configured for.
+ if (input_params.frames_per_buffer() != output_params.frames_per_buffer()) {
+ DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer()
+ << " to " << output_params.frames_per_buffer();
+ audio_fifo_.reset(new AudioPullFifo(
+ downmix_early_ ? output_params.channels() :
+ input_params.channels(),
+ input_params.frames_per_buffer(), base::Bind(
+ &AudioConverter::SourceCallback,
+ base::Unretained(this))));
+ }
+}
+
+AudioConverter::~AudioConverter() {}
+
+void AudioConverter::AddInput(InputCallback* input) {
+ // TODO(dalecurtis): Speculative CHECK for http://crbug.com/233026, should be
+ // converted to a DCHECK once resolved.
+ CHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) ==
+ transform_inputs_.end());
+ transform_inputs_.push_back(input);
+}
+
+void AudioConverter::RemoveInput(InputCallback* input) {
+ DCHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) !=
+ transform_inputs_.end());
+ transform_inputs_.remove(input);
+
+ if (transform_inputs_.empty())
+ Reset();
+}
+
+void AudioConverter::Reset() {
+ if (audio_fifo_)
+ audio_fifo_->Clear();
+ if (resampler_)
+ resampler_->Flush();
+}
+
+void AudioConverter::ConvertWithDelay(const base::TimeDelta& initial_delay,
+ AudioBus* dest) {
+ initial_delay_ = initial_delay;
+
+ if (transform_inputs_.empty()) {
+ dest->Zero();
+ return;
+ }
+
+ // Determine if channel mixing should be done and if it should be done before
+ // or after resampling. If it's possible to reduce the channel count prior to
+ // resampling we can save a lot of processing time. Vice versa, we don't want
+ // to increase the channel count prior to resampling for the same reason.
+ bool needs_mixing = channel_mixer_ && !downmix_early_;
+ AudioBus* temp_dest = needs_mixing ? unmixed_audio_.get() : dest;
+ DCHECK(temp_dest);
+
+ // Figure out which method to call based on whether we're resampling and
+ // rebuffering, just resampling, or just mixing. We want to avoid any extra
+ // steps when possible since we may be converting audio data in real time.
+ if (!resampler_ && !audio_fifo_) {
+ SourceCallback(0, temp_dest);
+ } else {
+ if (resampler_)
+ resampler_->Resample(temp_dest->frames(), temp_dest);
+ else
+ ProvideInput(0, temp_dest);
+ }
+
+ // Finally upmix the channels if we didn't do so earlier.
+ if (needs_mixing) {
+ DCHECK_EQ(temp_dest->frames(), dest->frames());
+ channel_mixer_->Transform(temp_dest, dest);
+ }
+}
+
+void AudioConverter::Convert(AudioBus* dest) {
+ ConvertWithDelay(base::TimeDelta::FromMilliseconds(0), dest);
+}
+
+void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) {
+ bool needs_downmix = channel_mixer_ && downmix_early_;
+
+ if (!mixer_input_audio_bus_ ||
+ mixer_input_audio_bus_->frames() != dest->frames()) {
+ mixer_input_audio_bus_ =
+ AudioBus::Create(input_channel_count_, dest->frames());
+ }
+
+ if (needs_downmix &&
+ (!unmixed_audio_ || unmixed_audio_->frames() != dest->frames())) {
+ // If we're downmixing early we need a temporary AudioBus which matches
+ // the the input channel count and input frame size since we're passing
+ // |unmixed_audio_| directly to the |source_callback_|.
+ unmixed_audio_ = AudioBus::Create(input_channel_count_, dest->frames());
+ }
+
+ AudioBus* temp_dest = needs_downmix ? unmixed_audio_.get() : dest;
+
+ // Sanity check our inputs.
+ DCHECK_EQ(temp_dest->frames(), mixer_input_audio_bus_->frames());
+ DCHECK_EQ(temp_dest->channels(), mixer_input_audio_bus_->channels());
+
+ // Calculate the buffer delay for this callback.
+ base::TimeDelta buffer_delay = initial_delay_;
+ if (resampler_) {
+ buffer_delay += base::TimeDelta::FromMicroseconds(
+ resampler_frame_delay_ * output_frame_duration_.InMicroseconds());
+ }
+ if (audio_fifo_) {
+ buffer_delay += base::TimeDelta::FromMicroseconds(
+ fifo_frame_delay * input_frame_duration_.InMicroseconds());
+ }
+
+ // Have each mixer render its data into an output buffer then mix the result.
+ for (InputCallbackSet::iterator it = transform_inputs_.begin();
+ it != transform_inputs_.end(); ++it) {
+ InputCallback* input = *it;
+
+ float volume = input->ProvideInput(
+ mixer_input_audio_bus_.get(), buffer_delay);
+
+ // Optimize the most common single input, full volume case.
+ if (it == transform_inputs_.begin()) {
+ if (volume == 1.0f) {
+ mixer_input_audio_bus_->CopyTo(temp_dest);
+ } else if (volume > 0) {
+ for (int i = 0; i < mixer_input_audio_bus_->channels(); ++i) {
+ vector_math::FMUL(
+ mixer_input_audio_bus_->channel(i), volume,
+ mixer_input_audio_bus_->frames(), temp_dest->channel(i));
+ }
+ } else {
+ // Zero |temp_dest| otherwise, so we're mixing into a clean buffer.
+ temp_dest->Zero();
+ }
+
+ continue;
+ }
+
+ // Volume adjust and mix each mixer input into |temp_dest| after rendering.
+ if (volume > 0) {
+ for (int i = 0; i < mixer_input_audio_bus_->channels(); ++i) {
+ vector_math::FMAC(
+ mixer_input_audio_bus_->channel(i), volume,
+ mixer_input_audio_bus_->frames(), temp_dest->channel(i));
+ }
+ }
+ }
+
+ if (needs_downmix) {
+ DCHECK_EQ(temp_dest->frames(), dest->frames());
+ channel_mixer_->Transform(temp_dest, dest);
+ }
+}
+
+void AudioConverter::ProvideInput(int resampler_frame_delay, AudioBus* dest) {
+ resampler_frame_delay_ = resampler_frame_delay;
+ if (audio_fifo_)
+ audio_fifo_->Consume(dest, dest->frames());
+ else
+ SourceCallback(0, dest);
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_converter.h b/chromium/media/base/audio_converter.h
new file mode 100644
index 00000000000..2e43ec880b5
--- /dev/null
+++ b/chromium/media/base/audio_converter.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// AudioConverter is a complete mixing, resampling, buffering, and channel
+// mixing solution for converting data from one set of AudioParameters to
+// another.
+//
+// For efficiency, pieces are only invoked when necessary; i.e.,
+// - The resampler is only used if sample rates differ.
+// - The FIFO is only used if buffer sizes differ.
+// - The channel mixer is only used if channel layouts differ.
+//
+// Additionally, since resampling is the most expensive operation, input mixing
+// and channel down mixing are done prior to resampling. Likewise, channel up
+// mixing is performed after resampling.
+
+#ifndef MEDIA_BASE_AUDIO_CONVERTER_H_
+#define MEDIA_BASE_AUDIO_CONVERTER_H_
+
+#include <list>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+class AudioPullFifo;
+class ChannelMixer;
+class MultiChannelResampler;
+
+// Converts audio data between two AudioParameters formats. Sample usage:
+// AudioParameters input(...), output(...);
+// AudioConverter ac(input, output);
+// scoped_ptr<AudioBus> output_audio_bus = AudioBus::Create(output);
+// ac.AddInput(<AudioConverter::InputCallback* 1>);
+// ac.AddInput(<AudioConverter::InputCallback* 2>);
+// ac.Convert(output_audio_bus.get());
+//
+// Convert() will ask for input audio data from each InputCallback and convert
+// the data into the provided AudioBus.
+class MEDIA_EXPORT AudioConverter {
+ public:
+ // Interface for inputs into the converter. Each InputCallback is added or
+ // removed from Convert() processing via AddInput() and RemoveInput().
+ class MEDIA_EXPORT InputCallback {
+ public:
+ // Method for providing more data into the converter. Expects |audio_bus|
+ // to be completely filled with data upon return; zero padded if not enough
+ // frames are available to satisfy the request. The return value is the
+ // volume level of the provided audio data. If a volume level of zero is
+ // returned no further processing will be done on the provided data, else
+ // the volume level will be used to scale the provided audio data.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) = 0;
+
+ protected:
+ virtual ~InputCallback() {}
+ };
+
+ // Constructs an AudioConverter for converting between the given input and
+ // output parameters. Specifying |disable_fifo| means all InputCallbacks are
+ // capable of handling arbitrary buffer size requests; i.e. one call might ask
+ // for 10 frames of data (indicated by the size of AudioBus provided) and the
+ // next might ask for 20. In synthetic testing, disabling the FIFO yields a
+ // ~20% speed up for common cases.
+ AudioConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ bool disable_fifo);
+ ~AudioConverter();
+
+ // Converts audio from all inputs into the |dest|. |dest| must be sized for
+ // data matching the output AudioParameters provided during construction. If
+ // an |initial_delay| is specified, it will be propagated to each input.
+ void Convert(AudioBus* dest);
+ void ConvertWithDelay(const base::TimeDelta& initial_delay, AudioBus* dest);
+
+ // Adds or removes an input from the converter. RemoveInput() will call
+ // Reset() if no inputs remain after the specified input is removed.
+ void AddInput(InputCallback* input);
+ void RemoveInput(InputCallback* input);
+
+ // Flushes all buffered data.
+ void Reset();
+
+ private:
+ // Provides input to the MultiChannelResampler. Called by the resampler when
+ // more data is necessary.
+ void ProvideInput(int resampler_frame_delay, AudioBus* audio_bus);
+
+ // Provides input to the AudioPullFifo. Called by the fifo when more data is
+ // necessary.
+ void SourceCallback(int fifo_frame_delay, AudioBus* audio_bus);
+
+ // Set of inputs for Convert().
+ typedef std::list<InputCallback*> InputCallbackSet;
+ InputCallbackSet transform_inputs_;
+
+ // Used to buffer data between the client and the output device in cases where
+ // the client buffer size is not the same as the output device buffer size.
+ scoped_ptr<AudioPullFifo> audio_fifo_;
+
+ // Handles resampling.
+ scoped_ptr<MultiChannelResampler> resampler_;
+
+ // Handles channel transforms. |unmixed_audio_| is a temporary destination
+ // for audio data before it goes into the channel mixer.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> unmixed_audio_;
+
+ // Temporary AudioBus destination for mixing inputs.
+ scoped_ptr<AudioBus> mixer_input_audio_bus_;
+
+ // Since resampling is expensive, figure out if we should downmix channels
+ // before resampling.
+ bool downmix_early_;
+
+ // Used to calculate buffer delay information for InputCallbacks.
+ base::TimeDelta input_frame_duration_;
+ base::TimeDelta output_frame_duration_;
+ base::TimeDelta initial_delay_;
+ int resampler_frame_delay_;
+
+ // Number of channels of input audio data. Set during construction via the
+ // value from the input AudioParameters class. Preserved to recreate internal
+ // AudioBus structures on demand in response to varying frame size requests.
+ const int input_channel_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioConverter);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_CONVERTER_H_
diff --git a/chromium/media/base/audio_converter_unittest.cc b/chromium/media/base/audio_converter_unittest.cc
new file mode 100644
index 00000000000..d218ac882f4
--- /dev/null
+++ b/chromium/media/base/audio_converter_unittest.cc
@@ -0,0 +1,362 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "media/base/audio_converter.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Command line switch for runtime adjustment of benchmark iterations.
+static const char kBenchmarkIterations[] = "audio-converter-iterations";
+static const int kDefaultIterations = 10;
+
+// Parameters which control the many input case tests.
+static const int kConvertInputs = 8;
+static const int kConvertCycles = 3;
+
+// Parameters used for testing.
+static const int kBitsPerChannel = 32;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kHighLatencyBufferSize = 2048;
+static const int kLowLatencyBufferSize = 256;
+static const int kSampleRate = 48000;
+
+// Number of full sine wave cycles for each Render() call.
+static const int kSineCycles = 4;
+
+// Tuple of <input rate, output rate, output channel layout, epsilon>.
+typedef std::tr1::tuple<int, int, ChannelLayout, double> AudioConverterTestData;
+class AudioConverterTest
+ : public testing::TestWithParam<AudioConverterTestData> {
+ public:
+ AudioConverterTest()
+ : epsilon_(std::tr1::get<3>(GetParam())) {
+ // Create input and output parameters based on test parameters.
+ input_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
+ std::tr1::get<0>(GetParam()), kBitsPerChannel, kHighLatencyBufferSize);
+ output_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, std::tr1::get<2>(GetParam()),
+ std::tr1::get<1>(GetParam()), 16, kLowLatencyBufferSize);
+
+ converter_.reset(new AudioConverter(
+ input_parameters_, output_parameters_, false));
+
+ audio_bus_ = AudioBus::Create(output_parameters_);
+ expected_audio_bus_ = AudioBus::Create(output_parameters_);
+
+ // Allocate one callback for generating expected results.
+ double step = kSineCycles / static_cast<double>(
+ output_parameters_.frames_per_buffer());
+ expected_callback_.reset(new FakeAudioRenderCallback(step));
+ }
+
+ // Creates |count| input callbacks to be used for conversion testing.
+ void InitializeInputs(int count) {
+ // Setup FakeAudioRenderCallback step to compensate for resampling.
+ double scale_factor = input_parameters_.sample_rate() /
+ static_cast<double>(output_parameters_.sample_rate());
+ double step = kSineCycles / (scale_factor *
+ static_cast<double>(output_parameters_.frames_per_buffer()));
+
+ for (int i = 0; i < count; ++i) {
+ fake_callbacks_.push_back(new FakeAudioRenderCallback(step));
+ converter_->AddInput(fake_callbacks_[i]);
+ }
+ }
+
+ // Resets all input callbacks to a pristine state.
+ void Reset() {
+ converter_->Reset();
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i)
+ fake_callbacks_[i]->reset();
+ expected_callback_->reset();
+ }
+
+ // Sets the volume on all input callbacks to |volume|.
+ void SetVolume(float volume) {
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i)
+ fake_callbacks_[i]->set_volume(volume);
+ }
+
+ // Validates audio data between |audio_bus_| and |expected_audio_bus_| from
+ // |index|..|frames| after |scale| is applied to the expected audio data.
+ bool ValidateAudioData(int index, int frames, float scale) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ for (int j = index; j < frames; ++j) {
+ double error = fabs(audio_bus_->channel(i)[j] -
+ expected_audio_bus_->channel(i)[j] * scale);
+ if (error > epsilon_) {
+ EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale,
+ audio_bus_->channel(i)[j], epsilon_)
+ << " i=" << i << ", j=" << j;
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ // Runs a single Convert() stage, fills |expected_audio_bus_| appropriately,
+ // and validates equality with |audio_bus_| after |scale| is applied.
+ bool RenderAndValidateAudioData(float scale) {
+ // Render actual audio data.
+ converter_->Convert(audio_bus_.get());
+
+ // Render expected audio data.
+ expected_callback_->Render(expected_audio_bus_.get(), 0);
+
+ // Zero out unused channels in the expected AudioBus just as AudioConverter
+ // would during channel mixing.
+ for (int i = input_parameters_.channels();
+ i < output_parameters_.channels(); ++i) {
+ memset(expected_audio_bus_->channel(i), 0,
+ audio_bus_->frames() * sizeof(*audio_bus_->channel(i)));
+ }
+
+ return ValidateAudioData(0, audio_bus_->frames(), scale);
+ }
+
+ // Fills |audio_bus_| fully with |value|.
+ void FillAudioData(float value) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ std::fill(audio_bus_->channel(i),
+ audio_bus_->channel(i) + audio_bus_->frames(), value);
+ }
+ }
+
+ // Verifies converter output with a |inputs| number of transform inputs.
+ void RunTest(int inputs) {
+ InitializeInputs(inputs);
+
+ SetVolume(0);
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(0));
+
+ Reset();
+
+ // Set a different volume for each input and verify the results.
+ float total_scale = 0;
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i) {
+ float volume = static_cast<float>(i) / fake_callbacks_.size();
+ total_scale += volume;
+ fake_callbacks_[i]->set_volume(volume);
+ }
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(total_scale));
+
+ Reset();
+
+ // Remove every other input.
+ for (size_t i = 1; i < fake_callbacks_.size(); i += 2)
+ converter_->RemoveInput(fake_callbacks_[i]);
+
+ SetVolume(1);
+ float scale = inputs > 1 ? inputs / 2.0f : inputs;
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(scale));
+ }
+
+ protected:
+ virtual ~AudioConverterTest() {}
+
+ // Converter under test.
+ scoped_ptr<AudioConverter> converter_;
+
+ // Input and output parameters used for AudioConverter construction.
+ AudioParameters input_parameters_;
+ AudioParameters output_parameters_;
+
+ // Destination AudioBus for AudioConverter output.
+ scoped_ptr<AudioBus> audio_bus_;
+
+ // AudioBus containing expected results for comparison with |audio_bus_|.
+ scoped_ptr<AudioBus> expected_audio_bus_;
+
+ // Vector of all input callbacks used to drive AudioConverter::Convert().
+ ScopedVector<FakeAudioRenderCallback> fake_callbacks_;
+
+ // Parallel input callback which generates the expected output.
+ scoped_ptr<FakeAudioRenderCallback> expected_callback_;
+
+ // Epsilon value with which to perform comparisons between |audio_bus_| and
+ // |expected_audio_bus_|.
+ double epsilon_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioConverterTest);
+};
+
+// Ensure the buffer delay provided by AudioConverter is accurate.
+TEST(AudioConverterTest, AudioDelay) {
+ // Choose input and output parameters such that the transform must make
+ // multiple calls to fill the buffer.
+ AudioParameters input_parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate,
+ kBitsPerChannel, kLowLatencyBufferSize);
+ AudioParameters output_parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate * 2,
+ kBitsPerChannel, kHighLatencyBufferSize);
+
+ AudioConverter converter(input_parameters, output_parameters, false);
+ FakeAudioRenderCallback callback(0.2);
+ scoped_ptr<AudioBus> audio_bus = AudioBus::Create(output_parameters);
+ converter.AddInput(&callback);
+ converter.Convert(audio_bus.get());
+
+ // Calculate the expected buffer delay for given AudioParameters.
+ double input_sample_rate = input_parameters.sample_rate();
+ int fill_count =
+ (output_parameters.frames_per_buffer() * input_sample_rate /
+ output_parameters.sample_rate()) / input_parameters.frames_per_buffer();
+
+ base::TimeDelta input_frame_duration = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond / input_sample_rate);
+
+ int expected_last_delay_milliseconds =
+ fill_count * input_parameters.frames_per_buffer() *
+ input_frame_duration.InMillisecondsF();
+
+ EXPECT_EQ(expected_last_delay_milliseconds,
+ callback.last_audio_delay_milliseconds());
+}
+
+// InputCallback that zero's out the provided AudioBus. Used for benchmarking.
+class NullInputProvider : public AudioConverter::InputCallback {
+ public:
+ NullInputProvider() {}
+ virtual ~NullInputProvider() {}
+
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE {
+ audio_bus->Zero();
+ return 1;
+ }
+};
+
+// Benchmark for audio conversion. Original benchmarks were run with
+// --audio-converter-iterations=50000.
+TEST(AudioConverterTest, ConvertBenchmark) {
+ int benchmark_iterations = kDefaultIterations;
+ std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ kBenchmarkIterations));
+ base::StringToInt(iterations, &benchmark_iterations);
+ if (benchmark_iterations < kDefaultIterations)
+ benchmark_iterations = kDefaultIterations;
+
+ NullInputProvider fake_input1;
+ NullInputProvider fake_input2;
+ NullInputProvider fake_input3;
+
+ printf("Benchmarking %d iterations:\n", benchmark_iterations);
+
+ {
+ // Create input and output parameters to convert between the two most common
+ // sets of parameters (as indicated via UMA data).
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 48000, 16, 2048);
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 44100, 16, 440);
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params);
+
+ scoped_ptr<AudioConverter> converter(
+ new AudioConverter(input_params, output_params, true));
+ converter->AddInput(&fake_input1);
+ converter->AddInput(&fake_input2);
+ converter->AddInput(&fake_input3);
+
+ // Benchmark Convert() w/ FIFO.
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < benchmark_iterations; ++i) {
+ converter->Convert(output_bus.get());
+ }
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convert() w/ Resampling took %.2fms.\n", total_time_ms);
+ }
+
+ // Create input and output parameters to convert between common buffer sizes
+ // without any resampling for the FIFO vs no FIFO benchmarks.
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 44100, 16, 2048);
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 44100, 16, 440);
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params);
+
+ {
+ scoped_ptr<AudioConverter> converter(
+ new AudioConverter(input_params, output_params, false));
+ converter->AddInput(&fake_input1);
+ converter->AddInput(&fake_input2);
+ converter->AddInput(&fake_input3);
+
+ // Benchmark Convert() w/ FIFO.
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < benchmark_iterations; ++i) {
+ converter->Convert(output_bus.get());
+ }
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convert() w/ FIFO took %.2fms.\n", total_time_ms);
+ }
+
+ {
+ scoped_ptr<AudioConverter> converter(
+ new AudioConverter(input_params, output_params, true));
+ converter->AddInput(&fake_input1);
+ converter->AddInput(&fake_input2);
+ converter->AddInput(&fake_input3);
+
+ // Benchmark Convert() w/o FIFO.
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < benchmark_iterations; ++i) {
+ converter->Convert(output_bus.get());
+ }
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convert() w/o FIFO took %.2fms.\n", total_time_ms);
+ }
+}
+
+TEST_P(AudioConverterTest, NoInputs) {
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+}
+
+TEST_P(AudioConverterTest, OneInput) {
+ RunTest(1);
+}
+
+TEST_P(AudioConverterTest, ManyInputs) {
+ RunTest(kConvertInputs);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ AudioConverterTest, AudioConverterTest, testing::Values(
+ // No resampling. No channel mixing.
+ std::tr1::make_tuple(44100, 44100, CHANNEL_LAYOUT_STEREO, 0.00000048),
+
+ // Upsampling. Channel upmixing.
+ std::tr1::make_tuple(44100, 48000, CHANNEL_LAYOUT_QUAD, 0.033),
+
+ // Downsampling. Channel downmixing.
+ std::tr1::make_tuple(48000, 41000, CHANNEL_LAYOUT_MONO, 0.042)));
+
+} // namespace media
diff --git a/chromium/media/base/audio_decoder.cc b/chromium/media/base/audio_decoder.cc
new file mode 100644
index 00000000000..939066078c4
--- /dev/null
+++ b/chromium/media/base/audio_decoder.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_decoder.h"
+
+namespace media {
+
+AudioDecoder::AudioDecoder() {}
+
+AudioDecoder::~AudioDecoder() {}
+
+} // namespace media
diff --git a/chromium/media/base/audio_decoder.h b/chromium/media/base/audio_decoder.h
new file mode 100644
index 00000000000..aa2eeb80ea8
--- /dev/null
+++ b/chromium/media/base/audio_decoder.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_DECODER_H_
+#define MEDIA_BASE_AUDIO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/channel_layout.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBuffer;
+class DemuxerStream;
+
+class MEDIA_EXPORT AudioDecoder {
+ public:
+ // Status codes for read operations.
+ enum Status {
+ kOk,
+ kAborted,
+ kDecodeError,
+ };
+
+ AudioDecoder();
+ virtual ~AudioDecoder();
+
+ // Initialize an AudioDecoder with the given DemuxerStream, executing the
+ // callback upon completion.
+ // statistics_cb is used to update global pipeline statistics.
+ virtual void Initialize(DemuxerStream* stream,
+ const PipelineStatusCB& status_cb,
+ const StatisticsCB& statistics_cb) = 0;
+
+ // Request samples to be decoded and returned via the provided callback.
+ // Only one read may be in flight at any given time.
+ //
+ // Implementations guarantee that the callback will not be called from within
+ // this method.
+ //
+ // Non-NULL sample buffer pointers will contain decoded audio data or may
+ // indicate the end of the stream. A NULL buffer pointer indicates an aborted
+ // Read(). This can happen if the DemuxerStream gets flushed and doesn't have
+ // any more data to return.
+ typedef base::Callback<void(Status, const scoped_refptr<AudioBuffer>&)>
+ ReadCB;
+ virtual void Read(const ReadCB& read_cb) = 0;
+
+ // Reset decoder state, dropping any queued encoded data.
+ virtual void Reset(const base::Closure& closure) = 0;
+
+ // Returns various information about the decoded audio format.
+ virtual int bits_per_channel() = 0;
+ virtual ChannelLayout channel_layout() = 0;
+ virtual int samples_per_second() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_DECODER_H_
diff --git a/chromium/media/base/audio_decoder_config.cc b/chromium/media/base/audio_decoder_config.cc
new file mode 100644
index 00000000000..38db05d3a54
--- /dev/null
+++ b/chromium/media/base/audio_decoder_config.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_decoder_config.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "media/audio/sample_rates.h"
+#include "media/base/limits.h"
+#include "media/base/sample_format.h"
+
+namespace media {
+
+AudioDecoderConfig::AudioDecoderConfig()
+ : codec_(kUnknownAudioCodec),
+ sample_format_(kUnknownSampleFormat),
+ bytes_per_channel_(0),
+ channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
+ samples_per_second_(0),
+ bytes_per_frame_(0),
+ is_encrypted_(false) {
+}
+
+AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
+ SampleFormat sample_format,
+ ChannelLayout channel_layout,
+ int samples_per_second,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted) {
+ Initialize(codec, sample_format, channel_layout, samples_per_second,
+ extra_data, extra_data_size, is_encrypted, true);
+}
+
+void AudioDecoderConfig::Initialize(AudioCodec codec,
+ SampleFormat sample_format,
+ ChannelLayout channel_layout,
+ int samples_per_second,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted,
+ bool record_stats) {
+ CHECK((extra_data_size != 0) == (extra_data != NULL));
+
+ if (record_stats) {
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioSampleFormat", sample_format,
+ kSampleFormatMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout,
+ CHANNEL_LAYOUT_MAX);
+ AudioSampleRate asr = media::AsAudioSampleRate(samples_per_second);
+ if (asr != kUnexpectedAudioSampleRate) {
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
+ kUnexpectedAudioSampleRate);
+ } else {
+ UMA_HISTOGRAM_COUNTS(
+ "Media.AudioSamplesPerSecondUnexpected", samples_per_second);
+ }
+ }
+
+ codec_ = codec;
+ channel_layout_ = channel_layout;
+ samples_per_second_ = samples_per_second;
+ sample_format_ = sample_format;
+ bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format);
+ extra_data_.assign(extra_data, extra_data + extra_data_size);
+ is_encrypted_ = is_encrypted;
+
+ int channels = ChannelLayoutToChannelCount(channel_layout_);
+ bytes_per_frame_ = channels * bytes_per_channel_;
+}
+
+AudioDecoderConfig::~AudioDecoderConfig() {}
+
+bool AudioDecoderConfig::IsValidConfig() const {
+ return codec_ != kUnknownAudioCodec &&
+ channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
+ bytes_per_channel_ > 0 &&
+ bytes_per_channel_ <= limits::kMaxBytesPerSample &&
+ samples_per_second_ > 0 &&
+ samples_per_second_ <= limits::kMaxSampleRate &&
+ sample_format_ != kUnknownSampleFormat;
+}
+
+bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
+ return ((codec() == config.codec()) &&
+ (bytes_per_channel() == config.bytes_per_channel()) &&
+ (channel_layout() == config.channel_layout()) &&
+ (samples_per_second() == config.samples_per_second()) &&
+ (extra_data_size() == config.extra_data_size()) &&
+ (!extra_data() || !memcmp(extra_data(), config.extra_data(),
+ extra_data_size())) &&
+ (is_encrypted() == config.is_encrypted()) &&
+ (sample_format() == config.sample_format()));
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
new file mode 100644
index 00000000000..1c61e70c3ad
--- /dev/null
+++ b/chromium/media/base/audio_decoder_config.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
+#define MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/channel_layout.h"
+#include "media/base/media_export.h"
+#include "media/base/sample_format.h"
+
+namespace media {
+
+enum AudioCodec {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a codec replace it with a dummy value; when adding a
+ // codec, do so at the bottom before kAudioCodecMax.
+ kUnknownAudioCodec = 0,
+ kCodecAAC,
+ kCodecMP3,
+ kCodecPCM,
+ kCodecVorbis,
+ kCodecFLAC,
+ kCodecAMR_NB,
+ kCodecAMR_WB,
+ kCodecPCM_MULAW,
+ kCodecGSM_MS,
+ kCodecPCM_S16BE,
+ kCodecPCM_S24BE,
+ kCodecOpus,
+ kCodecEAC3,
+ // DO NOT ADD RANDOM AUDIO CODECS!
+ //
+ // The only acceptable time to add a new codec is if there is production code
+ // that uses said codec in the same CL.
+
+ // Must always be last!
+ kAudioCodecMax
+};
+
+// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
+// |bits_per_channel|, we should switch over since bits are generally confusing
+// to work with.
+class MEDIA_EXPORT AudioDecoderConfig {
+ public:
+ // Constructs an uninitialized object. Clients should call Initialize() with
+ // appropriate values before using.
+ AudioDecoderConfig();
+
+ // Constructs an initialized object. It is acceptable to pass in NULL for
+ // |extra_data|, otherwise the memory is copied.
+ AudioDecoderConfig(AudioCodec codec, SampleFormat sample_format,
+ ChannelLayout channel_layout, int samples_per_second,
+ const uint8* extra_data, size_t extra_data_size,
+ bool is_encrypted);
+
+ ~AudioDecoderConfig();
+
+ // Resets the internal state of this object.
+ void Initialize(AudioCodec codec, SampleFormat sample_format,
+ ChannelLayout channel_layout, int samples_per_second,
+ const uint8* extra_data, size_t extra_data_size,
+ bool is_encrypted, bool record_stats);
+
+ // Returns true if this object has appropriate configuration values, false
+ // otherwise.
+ bool IsValidConfig() const;
+
+ // Returns true if all fields in |config| match this config.
+ // Note: The contents of |extra_data_| are compared not the raw pointers.
+ bool Matches(const AudioDecoderConfig& config) const;
+
+ AudioCodec codec() const { return codec_; }
+ int bits_per_channel() const { return bytes_per_channel_ * 8; }
+ int bytes_per_channel() const { return bytes_per_channel_; }
+ ChannelLayout channel_layout() const { return channel_layout_; }
+ int samples_per_second() const { return samples_per_second_; }
+ SampleFormat sample_format() const { return sample_format_; }
+ int bytes_per_frame() const { return bytes_per_frame_; }
+
+ // Optional byte data required to initialize audio decoders such as Vorbis
+ // codebooks.
+ const uint8* extra_data() const {
+ return extra_data_.empty() ? NULL : &extra_data_[0];
+ }
+ size_t extra_data_size() const { return extra_data_.size(); }
+
+ // Whether the audio stream is potentially encrypted.
+ // Note that in a potentially encrypted audio stream, individual buffers
+ // can be encrypted or not encrypted.
+ bool is_encrypted() const { return is_encrypted_; }
+
+ private:
+ AudioCodec codec_;
+ SampleFormat sample_format_;
+ int bytes_per_channel_;
+ ChannelLayout channel_layout_;
+ int samples_per_second_;
+ int bytes_per_frame_;
+ std::vector<uint8> extra_data_;
+ bool is_encrypted_;
+
+ // Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
+ // generated copy constructor and assignment operator. Since the extra data is
+ // typically small, the performance impact is minimal.
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
diff --git a/chromium/media/base/audio_fifo.cc b/chromium/media/base/audio_fifo.cc
new file mode 100644
index 00000000000..b6e8f806e05
--- /dev/null
+++ b/chromium/media/base/audio_fifo.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_fifo.h"
+
+#include "base/logging.h"
+
+using base::subtle::Atomic32;
+using base::subtle::NoBarrier_Store;
+
+namespace media {
+
+// Given current position in the FIFO, the maximum number of elements in the
+// FIFO and the size of the input; this method provides two output results:
+// |size| and |wrap_size|. These two results can then be utilized for memcopy
+// operations to and from the FIFO.
+// Under "normal" circumstances, |size| will be equal to |in_size| and
+// |wrap_size| will be zero. This case corresponding to the non-wrapping case
+// where we have not yet reached the "edge" of the FIFO. If |pos| + |in_size|
+// exceeds the total size of the FIFO, we must wrap around and start reusing
+// a part the allocated memory. The size of this part is given by |wrap_size|.
+static void GetSizes(
+ int pos, int max_size, int in_size, int* size, int* wrap_size) {
+ if (pos + in_size > max_size) {
+ // Wrapping is required => derive size of each segment.
+ *size = max_size - pos;
+ *wrap_size = in_size - *size;
+ } else {
+ // Wrapping is not required.
+ *size = in_size;
+ *wrap_size = 0;
+ }
+}
+
+// Updates the read/write position with |step| modulo the maximum number of
+// elements in the FIFO to ensure that the position counters wraps around at
+// the endpoint.
+static int UpdatePos(int pos, int step, int max_size) {
+ return ((pos + step) % max_size);
+}
+
+AudioFifo::AudioFifo(int channels, int frames)
+ : audio_bus_(AudioBus::Create(channels, frames)),
+ max_frames_(frames),
+ frames_pushed_(0),
+ frames_consumed_(0),
+ read_pos_(0),
+ write_pos_(0) {}
+
+AudioFifo::~AudioFifo() {}
+
+int AudioFifo::frames() const {
+ int delta = frames_pushed_ - frames_consumed_;
+ base::subtle::MemoryBarrier();
+ return delta;
+}
+
+void AudioFifo::Push(const AudioBus* source) {
+ DCHECK(source);
+ DCHECK_EQ(source->channels(), audio_bus_->channels());
+
+ // Ensure that there is space for the new data in the FIFO.
+ const int source_size = source->frames();
+ CHECK_LE(source_size + frames(), max_frames_);
+
+ // Figure out if wrapping is needed and if so what segment sizes we need
+ // when adding the new audio bus content to the FIFO.
+ int append_size = 0;
+ int wrap_size = 0;
+ GetSizes(write_pos_, max_frames(), source_size, &append_size, &wrap_size);
+
+ // Copy all channels from the source to the FIFO. Wrap around if needed.
+ for (int ch = 0; ch < source->channels(); ++ch) {
+ float* dest = audio_bus_->channel(ch);
+ const float* src = source->channel(ch);
+
+ // Append part of (or the complete) source to the FIFO.
+ memcpy(&dest[write_pos_], &src[0], append_size * sizeof(src[0]));
+ if (wrap_size > 0) {
+ // Wrapping is needed: copy remaining part from the source to the FIFO.
+ memcpy(&dest[0], &src[append_size], wrap_size * sizeof(src[0]));
+ }
+ }
+
+ // Ensure the data is *really* written before updating |frames_pushed_|.
+ base::subtle::MemoryBarrier();
+
+ Atomic32 new_frames_pushed = frames_pushed_ + source_size;
+ NoBarrier_Store(&frames_pushed_, new_frames_pushed);
+
+ DCHECK_LE(frames(), max_frames());
+ write_pos_ = UpdatePos(write_pos_, source_size, max_frames());
+}
+
+void AudioFifo::Consume(AudioBus* destination,
+ int start_frame,
+ int frames_to_consume) {
+ DCHECK(destination);
+ DCHECK_EQ(destination->channels(), audio_bus_->channels());
+
+ // It is not possible to ask for more data than what is available in the FIFO.
+ CHECK_LE(frames_to_consume, frames());
+
+ // A copy from the FIFO to |destination| will only be performed if the
+ // allocated memory in |destination| is sufficient.
+ CHECK_LE(frames_to_consume + start_frame, destination->frames());
+
+ // Figure out if wrapping is needed and if so what segment sizes we need
+ // when removing audio bus content from the FIFO.
+ int consume_size = 0;
+ int wrap_size = 0;
+ GetSizes(read_pos_, max_frames(), frames_to_consume,
+ &consume_size, &wrap_size);
+
+ // For all channels, remove the requested amount of data from the FIFO
+ // and copy the content to the destination. Wrap around if needed.
+ for (int ch = 0; ch < destination->channels(); ++ch) {
+ float* dest = destination->channel(ch);
+ const float* src = audio_bus_->channel(ch);
+
+ // Copy a selected part of the FIFO to the destination.
+ memcpy(&dest[start_frame], &src[read_pos_], consume_size * sizeof(src[0]));
+ if (wrap_size > 0) {
+ // Wrapping is needed: copy remaining part to the destination.
+ memcpy(&dest[consume_size + start_frame], &src[0],
+ wrap_size * sizeof(src[0]));
+ }
+ }
+
+ Atomic32 new_frames_consumed = frames_consumed_ + frames_to_consume;
+ NoBarrier_Store(&frames_consumed_, new_frames_consumed);
+
+ read_pos_ = UpdatePos(read_pos_, frames_to_consume, max_frames());
+}
+
+void AudioFifo::Clear() {
+ frames_pushed_ = 0;
+ frames_consumed_ = 0;
+ read_pos_ = 0;
+ write_pos_ = 0;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_fifo.h b/chromium/media/base/audio_fifo.h
new file mode 100644
index 00000000000..e978ace05ba
--- /dev/null
+++ b/chromium/media/base/audio_fifo.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_FIFO_H_
+#define MEDIA_BASE_AUDIO_FIFO_H_
+
+#include "base/atomicops.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// First-in first-out container for AudioBus elements.
+// The maximum number of audio frames in the FIFO is set at construction and
+// can not be extended dynamically. The allocated memory is utilized as a
+// ring buffer.
+// This class is thread-safe in the limited sense that one thread may call
+// Push(), while a second thread calls Consume().
+class MEDIA_EXPORT AudioFifo {
+ public:
+ // Creates a new AudioFifo and allocates |channels| of length |frames|.
+ AudioFifo(int channels, int frames);
+ virtual ~AudioFifo();
+
+ // Pushes all audio channel data from |source| to the FIFO.
+ // Push() will crash if the allocated space is insufficient.
+ void Push(const AudioBus* source);
+
+ // Consumes |frames_to_consume| audio frames from the FIFO and copies
+ // them to |destination| starting at position |start_frame|.
+ // Consume() will crash if the FIFO does not contain |frames_to_consume|
+ // frames or if there is insufficient space in |destination| to store the
+ // frames.
+ void Consume(AudioBus* destination, int start_frame, int frames_to_consume);
+
+ // Empties the FIFO without deallocating any memory.
+ void Clear();
+
+ // Number of actual audio frames in the FIFO.
+ int frames() const;
+
+ int max_frames() const { return max_frames_; }
+
+ private:
+ // The actual FIFO is an audio bus implemented as a ring buffer.
+ scoped_ptr<AudioBus> audio_bus_;
+
+ // Maximum number of elements the FIFO can contain.
+ // This value is set by |frames| in the constructor.
+ const int max_frames_;
+
+ // Number of actual elements in the FIFO.
+ volatile base::subtle::Atomic32 frames_pushed_;
+ volatile base::subtle::Atomic32 frames_consumed_;
+
+ // Current read position.
+ int read_pos_;
+
+ // Current write position.
+ int write_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioFifo);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_FIFO_H_
diff --git a/chromium/media/base/audio_fifo_unittest.cc b/chromium/media/base/audio_fifo_unittest.cc
new file mode 100644
index 00000000000..70ad9698453
--- /dev/null
+++ b/chromium/media/base/audio_fifo_unittest.cc
@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(henrika): add test which included |start_frame| in Consume() call.
+
+#include "media/base/audio_fifo.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class AudioFifoTest : public testing::Test {
+ public:
+ AudioFifoTest() {}
+ virtual ~AudioFifoTest() {}
+
+ void VerifyValue(const float data[], int size, float value) {
+ for (int i = 0; i < size; ++i)
+ ASSERT_FLOAT_EQ(value, data[i]) << "i=" << i;
+ }
+
+ protected:
+ DISALLOW_COPY_AND_ASSIGN(AudioFifoTest);
+};
+
+// Verify that construction works as intended.
+TEST_F(AudioFifoTest, Construct) {
+ static const int kChannels = 6;
+ static const int kMaxFrameCount = 128;
+ AudioFifo fifo(kChannels, kMaxFrameCount);
+ EXPECT_EQ(fifo.frames(), 0);
+}
+
+// Pushes audio bus objects to a FIFO and fill it up to different degrees.
+TEST_F(AudioFifoTest, Push) {
+ static const int kChannels = 2;
+ static const int kMaxFrameCount = 128;
+ AudioFifo fifo(kChannels, kMaxFrameCount);
+ {
+ SCOPED_TRACE("Push 50%");
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount / 2);
+ EXPECT_EQ(fifo.frames(), 0);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), bus->frames());
+ fifo.Clear();
+ }
+ {
+ SCOPED_TRACE("Push 100%");
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
+ EXPECT_EQ(fifo.frames(), 0);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), bus->frames());
+ fifo.Clear();
+ }
+}
+
+// Consumes audio bus objects from a FIFO and empty it to different degrees.
+TEST_F(AudioFifoTest, Consume) {
+ static const int kChannels = 2;
+ static const int kMaxFrameCount = 128;
+ AudioFifo fifo(kChannels, kMaxFrameCount);
+ {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), kMaxFrameCount);
+ }
+ {
+ SCOPED_TRACE("Consume 50%");
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount / 2);
+ fifo.Consume(bus.get(), 0, bus->frames());
+ EXPECT_TRUE(fifo.frames() == bus->frames());
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), kMaxFrameCount);
+ }
+ {
+ SCOPED_TRACE("Consume 100%");
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
+ fifo.Consume(bus.get(), 0, bus->frames());
+ EXPECT_EQ(fifo.frames(), 0);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), kMaxFrameCount);
+ }
+}
+
+// Verify that the frames() method of the FIFO works as intended while
+// appending and removing audio bus elements to/from the FIFO.
+TEST_F(AudioFifoTest, FramesInFifo) {
+ static const int kChannels = 2;
+ static const int kMaxFrameCount = 64;
+ AudioFifo fifo(kChannels, kMaxFrameCount);
+
+ // Fill up the FIFO and verify that the size grows as it should while adding
+ // one audio frame each time.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, 1);
+ int n = 0;
+ while (fifo.frames() < kMaxFrameCount) {
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), ++n);
+ }
+ EXPECT_EQ(fifo.frames(), kMaxFrameCount);
+
+ // Empty the FIFO and verify that the size decreases as it should.
+ // Reduce the size of the FIFO by one frame each time.
+ while (fifo.frames() > 0) {
+ fifo.Consume(bus.get(), 0, bus->frames());
+ EXPECT_EQ(fifo.frames(), --n);
+ }
+ EXPECT_EQ(fifo.frames(), 0);
+
+ // Verify that a steady-state size of #frames in the FIFO is maintained
+ // during a sequence of Push/Consume calls which involves wrapping. We ensure
+ // wrapping by selecting a buffer size which does divides the FIFO size
+ // with a remainder of one.
+ scoped_ptr<AudioBus> bus2 =
+ AudioBus::Create(kChannels, (kMaxFrameCount / 4) - 1);
+ const int frames_in_fifo = bus2->frames();
+ fifo.Push(bus2.get());
+ EXPECT_EQ(fifo.frames(), frames_in_fifo);
+ for (int n = 0; n < kMaxFrameCount; ++n) {
+ fifo.Push(bus2.get());
+ fifo.Consume(bus2.get(), 0, frames_in_fifo);
+ EXPECT_EQ(fifo.frames(), frames_in_fifo);
+ }
+}
+
+// Perform a sequence of Push/Consume calls and verify that the data written
+// to the FIFO is correctly retrieved, i.e., that the order is correct and the
+// values are correct.
+TEST_F(AudioFifoTest, VerifyDataValues) {
+ static const int kChannels = 2;
+ static const int kFrameCount = 2;
+ static const int kFifoFrameCount = 5 * kFrameCount;
+
+ AudioFifo fifo(kChannels, kFifoFrameCount);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
+ EXPECT_EQ(fifo.frames(), 0);
+ EXPECT_EQ(bus->frames(), kFrameCount);
+
+ // Start by filling up the FIFO with audio frames. The first audio frame
+ // will contain all 1's, the second all 2's etc. All channels contain the
+ // same value.
+ int value = 1;
+ while (fifo.frames() < kFifoFrameCount) {
+ for (int j = 0; j < bus->channels(); ++j)
+ std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), bus->frames() * value);
+ ++value;
+ }
+
+ // FIFO should be full now.
+ EXPECT_EQ(fifo.frames(), kFifoFrameCount);
+
+ // Consume all audio frames in the FIFO and verify that the stored values
+ // are correct. In this example, we shall read out: 1, 2, 3, 4, 5 in that
+ // order. Note that we set |frames_to_consume| to half the size of the bus.
+ // It means that we shall read out the same value two times in row.
+ value = 1;
+ int n = 1;
+ const int frames_to_consume = bus->frames() / 2;
+ while (fifo.frames() > 0) {
+ fifo.Consume(bus.get(), 0, frames_to_consume);
+ for (int j = 0; j < bus->channels(); ++j)
+ VerifyValue(bus->channel(j), frames_to_consume, value);
+ if (n++ % 2 == 0)
+ ++value; // counts 1, 1, 2, 2, 3, 3,...
+ }
+
+ // FIFO should be empty now.
+ EXPECT_EQ(fifo.frames(), 0);
+
+ // Push one audio bus to the FIFO and fill it with 1's.
+ value = 1;
+ for (int j = 0; j < bus->channels(); ++j)
+ std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value);
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), bus->frames());
+
+ // Keep calling Consume/Push a few rounds and verify that we read out the
+ // correct values. The number of elements shall be fixed (kFrameCount) during
+ // this phase.
+ for (int i = 0; i < 5 * kFifoFrameCount; i++) {
+ fifo.Consume(bus.get(), 0, bus->frames());
+ for (int j = 0; j < bus->channels(); ++j) {
+ VerifyValue(bus->channel(j), bus->channels(), value);
+ std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value + 1);
+ }
+ fifo.Push(bus.get());
+ EXPECT_EQ(fifo.frames(), bus->frames());
+ ++value;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_hardware_config.cc b/chromium/media/base/audio_hardware_config.cc
new file mode 100644
index 00000000000..d72fce7b4e2
--- /dev/null
+++ b/chromium/media/base/audio_hardware_config.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_hardware_config.h"
+
+using base::AutoLock;
+using media::AudioParameters;
+
+namespace media {
+
+AudioHardwareConfig::AudioHardwareConfig(
+ const AudioParameters& input_params,
+ const AudioParameters& output_params)
+ : input_params_(input_params),
+ output_params_(output_params) {
+}
+
+AudioHardwareConfig::~AudioHardwareConfig() {}
+
+int AudioHardwareConfig::GetOutputBufferSize() const {
+ AutoLock auto_lock(config_lock_);
+ return output_params_.frames_per_buffer();
+}
+
+int AudioHardwareConfig::GetOutputSampleRate() const {
+ AutoLock auto_lock(config_lock_);
+ return output_params_.sample_rate();
+}
+
+ChannelLayout AudioHardwareConfig::GetOutputChannelLayout() const {
+ AutoLock auto_lock(config_lock_);
+ return output_params_.channel_layout();
+}
+
+int AudioHardwareConfig::GetOutputChannels() const {
+ AutoLock auto_lock(config_lock_);
+ return output_params_.channels();
+}
+
+int AudioHardwareConfig::GetInputSampleRate() const {
+ AutoLock auto_lock(config_lock_);
+ return input_params_.sample_rate();
+}
+
+ChannelLayout AudioHardwareConfig::GetInputChannelLayout() const {
+ AutoLock auto_lock(config_lock_);
+ return input_params_.channel_layout();
+}
+
+int AudioHardwareConfig::GetInputChannels() const {
+ AutoLock auto_lock(config_lock_);
+ return input_params_.channels();
+}
+
+media::AudioParameters
+AudioHardwareConfig::GetInputConfig() const {
+ AutoLock auto_lock(config_lock_);
+ return input_params_;
+}
+
+media::AudioParameters
+AudioHardwareConfig::GetOutputConfig() const {
+ AutoLock auto_lock(config_lock_);
+ return output_params_;
+}
+
+void AudioHardwareConfig::UpdateInputConfig(
+ const AudioParameters& input_params) {
+ AutoLock auto_lock(config_lock_);
+ input_params_ = input_params;
+}
+
+void AudioHardwareConfig::UpdateOutputConfig(
+ const AudioParameters& output_params) {
+ AutoLock auto_lock(config_lock_);
+ output_params_ = output_params;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_hardware_config.h b/chromium/media/base/audio_hardware_config.h
new file mode 100644
index 00000000000..d1621b98224
--- /dev/null
+++ b/chromium/media/base/audio_hardware_config.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_HARDWARE_CONFIG_H_
+#define MEDIA_BASE_AUDIO_HARDWARE_CONFIG_H_
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/channel_layout.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Provides thread safe access to the audio hardware configuration.
+class MEDIA_EXPORT AudioHardwareConfig {
+ public:
+ AudioHardwareConfig(const media::AudioParameters& input_params,
+ const media::AudioParameters& output_params);
+ virtual ~AudioHardwareConfig();
+
+ // Accessors for the currently cached hardware configuration. Safe to call
+ // from any thread.
+ int GetOutputBufferSize() const;
+ int GetOutputSampleRate() const;
+ ChannelLayout GetOutputChannelLayout() const;
+ int GetOutputChannels() const;
+
+ int GetInputSampleRate() const;
+ ChannelLayout GetInputChannelLayout() const;
+ int GetInputChannels() const;
+
+ media::AudioParameters GetInputConfig() const;
+ media::AudioParameters GetOutputConfig() const;
+
+ // Allows callers to update the cached values for either input or output. The
+ // values are paired under the assumption that these values will only be set
+ // after an input or output device change respectively. Safe to call from
+ // any thread.
+ void UpdateInputConfig(const media::AudioParameters& input_params);
+ void UpdateOutputConfig(const media::AudioParameters& output_params);
+
+ private:
+ // Cached values; access is protected by |config_lock_|.
+ mutable base::Lock config_lock_;
+ media::AudioParameters input_params_;
+ media::AudioParameters output_params_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHardwareConfig);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_HARDWARE_CONFIG_H_
diff --git a/chromium/media/base/audio_hardware_config_unittest.cc b/chromium/media/base/audio_hardware_config_unittest.cc
new file mode 100644
index 00000000000..4a742bf51c8
--- /dev/null
+++ b/chromium/media/base/audio_hardware_config_unittest.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_hardware_config.h"
+#include "media/audio/audio_parameters.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kOutputBufferSize = 2048;
+static const int kOutputSampleRate = 48000;
+static const ChannelLayout kOutputChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kInputSampleRate = 44100;
+static const ChannelLayout kInputChannelLayout = CHANNEL_LAYOUT_STEREO;
+
+TEST(AudioHardwareConfig, Getters) {
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kInputChannelLayout,
+ kInputSampleRate,
+ 16,
+ kOutputBufferSize);
+
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ kOutputSampleRate,
+ 16,
+ kOutputBufferSize);
+
+ AudioHardwareConfig fake_config(input_params, output_params);
+
+ EXPECT_EQ(kOutputBufferSize, fake_config.GetOutputBufferSize());
+ EXPECT_EQ(kOutputSampleRate, fake_config.GetOutputSampleRate());
+ EXPECT_EQ(kInputSampleRate, fake_config.GetInputSampleRate());
+ EXPECT_EQ(kInputChannelLayout, fake_config.GetInputChannelLayout());
+}
+
+TEST(AudioHardwareConfig, Setters) {
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kInputChannelLayout,
+ kInputSampleRate,
+ 16,
+ kOutputBufferSize);
+
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ kOutputSampleRate,
+ 16,
+ kOutputBufferSize);
+
+ AudioHardwareConfig fake_config(input_params, output_params);
+
+ // Verify output parameters.
+ const int kNewOutputBufferSize = kOutputBufferSize * 2;
+ const int kNewOutputSampleRate = kOutputSampleRate * 2;
+ EXPECT_NE(kNewOutputBufferSize, fake_config.GetOutputBufferSize());
+ EXPECT_NE(kNewOutputSampleRate, fake_config.GetOutputSampleRate());
+
+ AudioParameters new_output_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutputChannelLayout,
+ kNewOutputSampleRate,
+ 16,
+ kNewOutputBufferSize);
+ fake_config.UpdateOutputConfig(new_output_params);
+ EXPECT_EQ(kNewOutputBufferSize, fake_config.GetOutputBufferSize());
+ EXPECT_EQ(kNewOutputSampleRate, fake_config.GetOutputSampleRate());
+
+ // Verify input parameters.
+ const int kNewInputSampleRate = kInputSampleRate * 2;
+ const ChannelLayout kNewInputChannelLayout = CHANNEL_LAYOUT_MONO;
+ EXPECT_NE(kNewInputSampleRate, fake_config.GetInputSampleRate());
+ EXPECT_NE(kNewInputChannelLayout, fake_config.GetInputChannelLayout());
+
+ AudioParameters new_input_params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kNewInputChannelLayout,
+ kNewInputSampleRate,
+ 16,
+ kOutputBufferSize);
+ fake_config.UpdateInputConfig(new_input_params);
+ EXPECT_EQ(kNewInputSampleRate, fake_config.GetInputSampleRate());
+ EXPECT_EQ(kNewInputChannelLayout, fake_config.GetInputChannelLayout());
+}
+
+} // namespace content
diff --git a/chromium/media/base/audio_hash.cc b/chromium/media/base/audio_hash.cc
new file mode 100644
index 00000000000..0ed6fe51e6f
--- /dev/null
+++ b/chromium/media/base/audio_hash.cc
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+#include <cmath>
+
+#include "media/base/audio_hash.h"
+
+#include "base/strings/stringprintf.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+AudioHash::AudioHash()
+ : audio_hash_(),
+ sample_count_(0) {
+ COMPILE_ASSERT(arraysize(audio_hash_) == kHashBuckets, audio_hash_size_error);
+}
+
+AudioHash::~AudioHash() {}
+
+void AudioHash::Update(const AudioBus* audio_bus, int frames) {
+ // Use uint32 to ensure overflow is a defined operation.
+ for (uint32 ch = 0; ch < static_cast<uint32>(audio_bus->channels()); ++ch) {
+ const float* channel = audio_bus->channel(ch);
+ for (uint32 i = 0; i < static_cast<uint32>(frames); ++i) {
+ const uint32 kSampleIndex = sample_count_ + i;
+ const uint32 kHashIndex = (kSampleIndex * (ch + 1)) % kHashBuckets;
+
+ // Mix in a sine wave with the result so we ensure that sequences of empty
+ // buffers don't result in an empty hash.
+ if (ch == 0) {
+ audio_hash_[kHashIndex] +=
+ channel[i] + sin(2.0 * M_PI * M_PI * kSampleIndex);
+ } else {
+ audio_hash_[kHashIndex] += channel[i];
+ }
+ }
+ }
+
+ sample_count_ += static_cast<uint32>(frames);
+}
+
+std::string AudioHash::ToString() const {
+ std::string result;
+ for (size_t i = 0; i < arraysize(audio_hash_); ++i)
+ result += base::StringPrintf("%.2f,", audio_hash_[i]);
+ return result;
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/base/audio_hash.h b/chromium/media/base/audio_hash.h
new file mode 100644
index 00000000000..3dc0e9edb3b
--- /dev/null
+++ b/chromium/media/base/audio_hash.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_HASH_H_
+#define MEDIA_BASE_AUDIO_HASH_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/strings/string_piece.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+
+// Computes a running hash for a series of AudioBus objects. The hash is the
+// sum of each sample bucketed based on the frame index, channel number, and
+// current hash count. The hash was designed with two properties in mind:
+//
+// 1. Uniform error distribution across the input sample.
+// 2. Resilience to error below a certain threshold.
+//
+// The first is achieved by using a simple summing approach and moving position
+// weighting into the bucket choice. The second is handled during conversion to
+// string by rounding out values to only two decimal places.
+//
+// Using only two decimal places allows for roughly -40 dBFS of error. For
+// reference, SincResampler produces an RMS error of around -15 dBFS. See
+// http://en.wikipedia.org/wiki/DBFS and http://crbug.com/168204 for more info.
+class MEDIA_EXPORT AudioHash {
+ public:
+ AudioHash();
+ ~AudioHash();
+
+ // Update current hash with the contents of the provided AudioBus.
+ void Update(const AudioBus* audio_bus, int frames);
+
+ // Return a string representation of the current hash.
+ std::string ToString() const;
+
+ private:
+ // Storage for the audio hash. The number of buckets controls the importance
+ // of position in the hash. A higher number reduces the chance of false
+ // positives related to incorrect sample position. Value chosen by dice roll.
+ enum { kHashBuckets = 6 };
+ float audio_hash_[kHashBuckets];
+
+ // The total number of samples processed per channel. Uses a uint32 instead
+ // of size_t so overflows on 64-bit and 32-bit machines are equivalent.
+ uint32 sample_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHash);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_HASH_H_ \ No newline at end of file
diff --git a/chromium/media/base/audio_hash_unittest.cc b/chromium/media/base/audio_hash_unittest.cc
new file mode 100644
index 00000000000..ee1b1de420e
--- /dev/null
+++ b/chromium/media/base/audio_hash_unittest.cc
@@ -0,0 +1,167 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_hash.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kChannelCount = 2;
+static const int kFrameCount = 1024;
+
+class AudioHashTest : public testing::Test {
+ public:
+ AudioHashTest()
+ : bus_one_(AudioBus::Create(kChannelCount, kFrameCount)),
+ bus_two_(AudioBus::Create(kChannelCount, kFrameCount)),
+ fake_callback_(0.01) {
+
+ // Fill each channel in each bus with unique data.
+ GenerateUniqueChannels(bus_one_.get());
+ GenerateUniqueChannels(bus_two_.get());
+ }
+
+ void GenerateUniqueChannels(AudioBus* audio_bus) {
+ // Use an AudioBus wrapper to avoid an extra memcpy when filling channels.
+ scoped_ptr<AudioBus> wrapped_bus = AudioBus::CreateWrapper(1);
+ wrapped_bus->set_frames(audio_bus->frames());
+
+ // Since FakeAudioRenderCallback generates only a single channel of unique
+ // audio data, we need to fill each channel manually.
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ wrapped_bus->SetChannelData(0, audio_bus->channel(ch));
+ fake_callback_.Render(wrapped_bus.get(), 0);
+ }
+ }
+
+ virtual ~AudioHashTest() {}
+
+ protected:
+ scoped_ptr<AudioBus> bus_one_;
+ scoped_ptr<AudioBus> bus_two_;
+ FakeAudioRenderCallback fake_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHashTest);
+};
+
+// Ensure the same data hashes the same.
+TEST_F(AudioHashTest, Equivalence) {
+ AudioHash hash_one;
+ hash_one.Update(bus_one_.get(), bus_one_->frames());
+
+ AudioHash hash_two;
+ hash_two.Update(bus_one_.get(), bus_one_->frames());
+
+ EXPECT_EQ(hash_one.ToString(), hash_two.ToString());
+}
+
+// Ensure sample order matters to the hash.
+TEST_F(AudioHashTest, SampleOrder) {
+ AudioHash original_hash;
+ original_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ // Swap a sample in the bus.
+ std::swap(bus_one_->channel(0)[0], bus_one_->channel(0)[1]);
+
+ AudioHash swapped_hash;
+ swapped_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ EXPECT_NE(original_hash.ToString(), swapped_hash.ToString());
+}
+
+// Ensure channel order matters to the hash.
+TEST_F(AudioHashTest, ChannelOrder) {
+ AudioHash original_hash;
+ original_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ // Reverse channel order for the same sample data.
+ const int channels = bus_one_->channels();
+ scoped_ptr<AudioBus> swapped_ch_bus = AudioBus::CreateWrapper(channels);
+ swapped_ch_bus->set_frames(bus_one_->frames());
+ for (int i = channels - 1; i >= 0; --i)
+ swapped_ch_bus->SetChannelData(channels - (i + 1), bus_one_->channel(i));
+
+ AudioHash swapped_hash;
+ swapped_hash.Update(swapped_ch_bus.get(), swapped_ch_bus->frames());
+
+ EXPECT_NE(original_hash.ToString(), swapped_hash.ToString());
+}
+
+// Ensure bus order matters to the hash.
+TEST_F(AudioHashTest, BusOrder) {
+ AudioHash original_hash;
+ original_hash.Update(bus_one_.get(), bus_one_->frames());
+ original_hash.Update(bus_two_.get(), bus_two_->frames());
+
+ AudioHash reordered_hash;
+ reordered_hash.Update(bus_two_.get(), bus_two_->frames());
+ reordered_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ EXPECT_NE(original_hash.ToString(), reordered_hash.ToString());
+}
+
+// Ensure bus order matters to the hash even with empty buses.
+TEST_F(AudioHashTest, EmptyBusOrder) {
+ bus_one_->Zero();
+ bus_two_->Zero();
+
+ AudioHash one_bus_hash;
+ one_bus_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ AudioHash two_bus_hash;
+ two_bus_hash.Update(bus_one_.get(), bus_one_->frames());
+ two_bus_hash.Update(bus_two_.get(), bus_two_->frames());
+
+ EXPECT_NE(one_bus_hash.ToString(), two_bus_hash.ToString());
+}
+
+// Where A = [0, n], ensure hash(A[0:n/2]), hash(A[n/2:n]) and hash(A) result
+// in the same value.
+TEST_F(AudioHashTest, HashIgnoresUpdateOrder) {
+ AudioHash full_hash;
+ full_hash.Update(bus_one_.get(), bus_one_->frames());
+
+ AudioHash half_hash;
+ half_hash.Update(bus_one_.get(), bus_one_->frames() / 2);
+
+ // Create a new bus representing the second half of |bus_one_|.
+ const int half_frames = bus_one_->frames() / 2;
+ const int channels = bus_one_->channels();
+ scoped_ptr<AudioBus> half_bus = AudioBus::CreateWrapper(channels);
+ half_bus->set_frames(half_frames);
+ for (int i = 0; i < channels; ++i)
+ half_bus->SetChannelData(i, bus_one_->channel(i) + half_frames);
+
+ half_hash.Update(half_bus.get(), half_bus->frames());
+ EXPECT_EQ(full_hash.ToString(), half_hash.ToString());
+}
+
+// Ensure approximate hashes pass verification.
+TEST_F(AudioHashTest, VerifySimilarHash) {
+ AudioHash hash_one;
+ hash_one.Update(bus_one_.get(), bus_one_->frames());
+
+ // Twiddle the values inside the first bus.
+ float* channel = bus_one_->channel(0);
+ for (int i = 0; i < bus_one_->frames(); i += bus_one_->frames() / 64)
+ channel[i] += 0.0001f;
+
+ AudioHash hash_two;
+ hash_two.Update(bus_one_.get(), bus_one_->frames());
+
+ EXPECT_EQ(hash_one.ToString(), hash_two.ToString());
+
+ // Twiddle the values too much...
+ for (int i = 0; i < bus_one_->frames(); ++i)
+ channel[i] += 0.0001f;
+
+ AudioHash hash_three;
+ hash_three.Update(bus_one_.get(), bus_one_->frames());
+ EXPECT_NE(hash_one.ToString(), hash_three.ToString());
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/base/audio_pull_fifo.cc b/chromium/media/base/audio_pull_fifo.cc
new file mode 100644
index 00000000000..cf25142d904
--- /dev/null
+++ b/chromium/media/base/audio_pull_fifo.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_pull_fifo.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+AudioPullFifo::AudioPullFifo(int channels, int frames, const ReadCB& read_cb)
+ : read_cb_(read_cb),
+ fifo_(AudioBus::Create(channels, frames)),
+ fifo_index_(frames) {}
+
+AudioPullFifo::~AudioPullFifo() {}
+
+void AudioPullFifo::Consume(AudioBus* destination, int frames_to_consume) {
+ DCHECK_LE(frames_to_consume, destination->frames());
+
+ int remaining_frames_to_provide = frames_to_consume;
+
+ // Try to fulfill the request using what's available in the FIFO.
+ int frames_read = ReadFromFifo(destination, remaining_frames_to_provide, 0);
+ int write_pos = frames_read;
+ remaining_frames_to_provide -= frames_read;
+
+ // Get the remaining audio frames from the producer using the callback.
+ while (remaining_frames_to_provide > 0) {
+ DCHECK_EQ(fifo_index_, fifo_->frames());
+ fifo_index_ = 0;
+
+ // Fill up the FIFO by acquiring audio data from the producer.
+ read_cb_.Run(write_pos, fifo_.get());
+
+ // Try to fulfill the request using what's available in the FIFO.
+ frames_read =
+ ReadFromFifo(destination, remaining_frames_to_provide, write_pos);
+ write_pos += frames_read;
+ remaining_frames_to_provide -= frames_read;
+ }
+}
+
+void AudioPullFifo::Clear() { fifo_index_ = fifo_->frames(); }
+
+int AudioPullFifo::ReadFromFifo(AudioBus* destination,
+ int frames_to_provide,
+ int write_pos) {
+ int frames = std::min(frames_to_provide, fifo_->frames() - fifo_index_);
+ if (frames <= 0)
+ return 0;
+
+ for (int ch = 0; ch < fifo_->channels(); ++ch) {
+ const float* src = fifo_->channel(ch) + fifo_index_;
+ float* dest = destination->channel(ch) + write_pos;
+ memcpy(dest, src, frames * sizeof(*src));
+ }
+
+ fifo_index_ += frames;
+ return frames;
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_pull_fifo.h b/chromium/media/base/audio_pull_fifo.h
new file mode 100644
index 00000000000..338f9b4cb6c
--- /dev/null
+++ b/chromium/media/base/audio_pull_fifo.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_PULL_FIFO_H_
+#define MEDIA_BASE_AUDIO_PULL_FIFO_H_
+
+#include "base/callback.h"
+#include "media/base/media_export.h"
+
+namespace media {
+class AudioBus;
+
+// A FIFO (First In First Out) buffer to handle mismatches in buffer sizes
+// between a producer and consumer. The consumer will pull data from this FIFO.
+// If data is already available in the FIFO, it is provided to the consumer.
+// If insufficient data is available to satisfy the request, the FIFO will ask
+// the producer for more data to fulfill a request.
+class MEDIA_EXPORT AudioPullFifo {
+ public:
+ // Callback type for providing more data into the FIFO. Expects AudioBus
+ // to be completely filled with data upon return; zero padded if not enough
+ // frames are available to satisfy the request. |frame_delay| is the number
+ // of output frames already processed and can be used to estimate delay.
+ typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
+
+ // Constructs an AudioPullFifo with the specified |read_cb|, which is used to
+ // read audio data to the FIFO if data is not already available. The internal
+ // FIFO can contain |channel| number of channels, where each channel is of
+ // length |frames| audio frames.
+ AudioPullFifo(int channels, int frames, const ReadCB& read_cb);
+ virtual ~AudioPullFifo();
+
+ // Consumes |frames_to_consume| audio frames from the FIFO and copies
+ // them to |destination|. If the FIFO does not have enough data, we ask
+ // the producer to give us more data to fulfill the request using the
+ // ReadCB implementation.
+ void Consume(AudioBus* destination, int frames_to_consume);
+
+ // Empties the FIFO without deallocating any memory.
+ void Clear();
+
+ private:
+ // Attempt to fulfill the request using what is available in the FIFO.
+ // Append new data to the |destination| starting at |write_pos|.
+ int ReadFromFifo(AudioBus* destination, int frames_to_provide, int write_pos);
+
+ // Source of data to the FIFO.
+ const ReadCB read_cb_;
+
+ // Temporary audio bus to hold the data from the producer.
+ scoped_ptr<AudioBus> fifo_;
+ int fifo_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPullFifo);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_PULL_FIFO_H_
diff --git a/chromium/media/base/audio_pull_fifo_unittest.cc b/chromium/media/base/audio_pull_fifo_unittest.cc
new file mode 100644
index 00000000000..dc90a6d94f3
--- /dev/null
+++ b/chromium/media/base/audio_pull_fifo_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_pull_fifo.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Block diagram of a possible real-world usage:
+//
+// | Producer | ----> | AudioPullFifo | ----> | Consumer |
+// push pull
+// 2048 ----> (2048) ----> ~512
+
+// Number of channels in each audio bus.
+static int kChannels = 2;
+
+// Max number of audio framed the FIFO can contain.
+static const int kMaxFramesInFifo = 2048;
+
+class AudioPullFifoTest
+ : public testing::TestWithParam<int> {
+ public:
+ AudioPullFifoTest()
+ : pull_fifo_(kChannels, kMaxFramesInFifo, base::Bind(
+ &AudioPullFifoTest::ProvideInput, base::Unretained(this))),
+ audio_bus_(AudioBus::Create(kChannels, kMaxFramesInFifo)),
+ fill_value_(0),
+ last_frame_delay_(-1) {}
+ virtual ~AudioPullFifoTest() {}
+
+ void VerifyValue(const float data[], int size, float start_value) {
+ float value = start_value;
+ for (int i = 0; i < size; ++i) {
+ ASSERT_FLOAT_EQ(value++, data[i]) << "i=" << i;
+ }
+ }
+
+ // Consume data using different sizes, acquire audio frames from the FIFO
+ // and verify that the retrieved values matches the values written by the
+ // producer.
+ void ConsumeTest(int frames_to_consume) {
+ int start_value = 0;
+ SCOPED_TRACE(base::StringPrintf("Checking frames_to_consume %d",
+ frames_to_consume));
+ pull_fifo_.Consume(audio_bus_.get(), frames_to_consume);
+ for (int j = 0; j < kChannels; ++j) {
+ VerifyValue(audio_bus_->channel(j), frames_to_consume, start_value);
+ }
+ start_value += frames_to_consume;
+ EXPECT_LT(last_frame_delay_, audio_bus_->frames());
+ }
+
+ // AudioPullFifo::ReadCB implementation where we increase a value for each
+ // audio frame that we provide. Note that all channels are given the same
+ // value to simplify the verification.
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ ASSERT_GT(frame_delay, last_frame_delay_);
+ last_frame_delay_ = frame_delay;
+
+ EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
+ EXPECT_EQ(audio_bus->frames(), kMaxFramesInFifo);
+ for (int i = 0; i < audio_bus->frames(); ++i) {
+ for (int j = 0; j < audio_bus->channels(); ++j) {
+ // Store same value in all channels.
+ audio_bus->channel(j)[i] = fill_value_;
+ }
+ fill_value_++;
+ }
+ }
+
+ protected:
+ AudioPullFifo pull_fifo_;
+ scoped_ptr<AudioBus> audio_bus_;
+ int fill_value_;
+ int last_frame_delay_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPullFifoTest);
+};
+
+TEST_P(AudioPullFifoTest, Consume) {
+ ConsumeTest(GetParam());
+}
+
+// Test common |frames_to_consume| values which will be used as input
+// parameter to AudioPullFifo::Consume() when the consumer asks for data.
+INSTANTIATE_TEST_CASE_P(
+ AudioPullFifoTest, AudioPullFifoTest,
+ testing::Values(544, 512, 512, 512, 512, 2048, 544, 441, 440, 433, 500));
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer.cc b/chromium/media/base/audio_renderer.cc
new file mode 100644
index 00000000000..e7b737ece5e
--- /dev/null
+++ b/chromium/media/base/audio_renderer.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_renderer.h"
+
+namespace media {
+
+AudioRenderer::AudioRenderer() {}
+AudioRenderer::~AudioRenderer() {}
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer.h b/chromium/media/base/audio_renderer.h
new file mode 100644
index 00000000000..bcc06b1c4e8
--- /dev/null
+++ b/chromium/media/base/audio_renderer.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_RENDERER_H_
+#define MEDIA_BASE_AUDIO_RENDERER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+
+namespace media {
+
+class DemuxerStream;
+
+class MEDIA_EXPORT AudioRenderer {
+ public:
+ // First parameter is the current time that has been rendered.
+ // Second parameter is the maximum time value that the clock cannot exceed.
+ typedef base::Callback<void(base::TimeDelta, base::TimeDelta)> TimeCB;
+
+ AudioRenderer();
+ virtual ~AudioRenderer();
+
+ // Initialize an AudioRenderer with |stream|, executing |init_cb| upon
+ // completion.
+ //
+ // |statistics_cb| is executed periodically with audio rendering stats.
+ //
+ // |underflow_cb| is executed when the renderer runs out of data to pass to
+ // the audio card during playback. ResumeAfterUnderflow() must be called
+ // to resume playback. Pause(), Preroll(), or Stop() cancels the underflow
+ // condition.
+ //
+ // |time_cb| is executed whenever time has advanced by way of audio rendering.
+ //
+ // |ended_cb| is executed when audio rendering has reached the end of stream.
+ //
+ // |disabled_cb| is executed when audio rendering has been disabled due to
+ // external factors (i.e., device was removed). |time_cb| will no longer be
+ // executed. TODO(scherkus): this might not be needed http://crbug.com/234708
+ //
+ // |error_cb| is executed if an error was encountered.
+ virtual void Initialize(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const base::Closure& underflow_cb,
+ const TimeCB& time_cb,
+ const base::Closure& ended_cb,
+ const base::Closure& disabled_cb,
+ const PipelineStatusCB& error_cb) = 0;
+
+ // Start audio decoding and rendering at the current playback rate, executing
+ // |callback| when playback is underway.
+ virtual void Play(const base::Closure& callback) = 0;
+
+ // Temporarily suspend decoding and rendering audio, executing |callback| when
+ // playback has been suspended.
+ virtual void Pause(const base::Closure& callback) = 0;
+
+ // Discard any audio data, executing |callback| when completed.
+ virtual void Flush(const base::Closure& callback) = 0;
+
+ // Start prerolling audio data for samples starting at |time|, executing
+ // |callback| when completed.
+ //
+ // Only valid to call after a successful Initialize() or Flush().
+ virtual void Preroll(base::TimeDelta time,
+ const PipelineStatusCB& callback) = 0;
+
+ // Stop all operations in preparation for being deleted, executing |callback|
+ // when complete.
+ virtual void Stop(const base::Closure& callback) = 0;
+
+ // Updates the current playback rate.
+ virtual void SetPlaybackRate(float playback_rate) = 0;
+
+ // Sets the output volume.
+ virtual void SetVolume(float volume) = 0;
+
+ // Resumes playback after underflow occurs.
+ virtual void ResumeAfterUnderflow() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioRenderer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_RENDERER_H_
diff --git a/chromium/media/base/audio_renderer_mixer.cc b/chromium/media/base/audio_renderer_mixer.cc
new file mode 100644
index 00000000000..11b12110260
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_renderer_mixer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+
+namespace media {
+
+enum { kPauseDelaySeconds = 10 };
+
+AudioRendererMixer::AudioRendererMixer(
+ const AudioParameters& input_params, const AudioParameters& output_params,
+ const scoped_refptr<AudioRendererSink>& sink)
+ : audio_sink_(sink),
+ audio_converter_(input_params, output_params, true),
+ pause_delay_(base::TimeDelta::FromSeconds(kPauseDelaySeconds)),
+ last_play_time_(base::TimeTicks::Now()),
+ // Initialize |playing_| to true since Start() results in an auto-play.
+ playing_(true) {
+ audio_sink_->Initialize(output_params, this);
+ audio_sink_->Start();
+}
+
+AudioRendererMixer::~AudioRendererMixer() {
+ // AudioRendererSinks must be stopped before being destructed.
+ audio_sink_->Stop();
+
+ // Ensures that all mixer inputs have stopped themselves prior to destruction
+ // and have called RemoveMixerInput().
+ DCHECK_EQ(mixer_inputs_.size(), 0U);
+}
+
+void AudioRendererMixer::AddMixerInput(AudioConverter::InputCallback* input,
+ const base::Closure& error_cb) {
+ base::AutoLock auto_lock(mixer_inputs_lock_);
+
+ if (!playing_) {
+ playing_ = true;
+ last_play_time_ = base::TimeTicks::Now();
+ audio_sink_->Play();
+ }
+
+ DCHECK(mixer_inputs_.find(input) == mixer_inputs_.end());
+ mixer_inputs_[input] = error_cb;
+ audio_converter_.AddInput(input);
+}
+
+void AudioRendererMixer::RemoveMixerInput(
+ AudioConverter::InputCallback* input) {
+ base::AutoLock auto_lock(mixer_inputs_lock_);
+ audio_converter_.RemoveInput(input);
+
+ DCHECK(mixer_inputs_.find(input) != mixer_inputs_.end());
+ mixer_inputs_.erase(input);
+}
+
+int AudioRendererMixer::Render(AudioBus* audio_bus,
+ int audio_delay_milliseconds) {
+ base::AutoLock auto_lock(mixer_inputs_lock_);
+
+ // If there are no mixer inputs and we haven't seen one for a while, pause the
+ // sink to avoid wasting resources when media elements are present but remain
+ // in the pause state.
+ const base::TimeTicks now = base::TimeTicks::Now();
+ if (!mixer_inputs_.empty()) {
+ last_play_time_ = now;
+ } else if (now - last_play_time_ >= pause_delay_ && playing_) {
+ audio_sink_->Pause();
+ playing_ = false;
+ }
+
+ audio_converter_.ConvertWithDelay(
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds), audio_bus);
+ return audio_bus->frames();
+}
+
+void AudioRendererMixer::OnRenderError() {
+ base::AutoLock auto_lock(mixer_inputs_lock_);
+
+ // Call each mixer input and signal an error.
+ for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin();
+ it != mixer_inputs_.end(); ++it) {
+ it->second.Run();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer_mixer.h b/chromium/media/base/audio_renderer_mixer.h
new file mode 100644
index 00000000000..942c61fe849
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
+#define MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
+
+#include <map>
+
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "media/base/audio_converter.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+// Mixes a set of AudioConverter::InputCallbacks into a single output stream
+// which is funneled into a single shared AudioRendererSink; saving a bundle
+// on renderer side resources.
+class MEDIA_EXPORT AudioRendererMixer
+ : NON_EXPORTED_BASE(public AudioRendererSink::RenderCallback) {
+ public:
+ AudioRendererMixer(const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ const scoped_refptr<AudioRendererSink>& sink);
+ virtual ~AudioRendererMixer();
+
+ // Add or remove a mixer input from mixing; called by AudioRendererMixerInput.
+ void AddMixerInput(AudioConverter::InputCallback* input,
+ const base::Closure& error_cb);
+ void RemoveMixerInput(AudioConverter::InputCallback* input);
+
+ void set_pause_delay_for_testing(base::TimeDelta delay) {
+ pause_delay_ = delay;
+ }
+
+ private:
+ // AudioRendererSink::RenderCallback implementation.
+ virtual int Render(AudioBus* audio_bus,
+ int audio_delay_milliseconds) OVERRIDE;
+ virtual void OnRenderError() OVERRIDE;
+
+ // Output sink for this mixer.
+ scoped_refptr<AudioRendererSink> audio_sink_;
+
+ // Set of mixer inputs to be mixed by this mixer. Access is thread-safe
+ // through |mixer_inputs_lock_|.
+ typedef std::map<AudioConverter::InputCallback*, base::Closure>
+ AudioRendererMixerInputSet;
+ AudioRendererMixerInputSet mixer_inputs_;
+ base::Lock mixer_inputs_lock_;
+
+ // Handles mixing and resampling between input and output parameters.
+ AudioConverter audio_converter_;
+
+ // Handles physical stream pause when no inputs are playing. For latency
+ // reasons we don't want to immediately pause the physical stream.
+ base::TimeDelta pause_delay_;
+ base::TimeTicks last_play_time_;
+ bool playing_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererMixer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
diff --git a/chromium/media/base/audio_renderer_mixer_input.cc b/chromium/media/base/audio_renderer_mixer_input.cc
new file mode 100644
index 00000000000..ffdcfa875f7
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer_input.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_renderer_mixer_input.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "media/base/audio_renderer_mixer.h"
+
+namespace media {
+
+AudioRendererMixerInput::AudioRendererMixerInput(
+ const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb)
+ : playing_(false),
+ initialized_(false),
+ volume_(1.0f),
+ get_mixer_cb_(get_mixer_cb),
+ remove_mixer_cb_(remove_mixer_cb),
+ mixer_(NULL),
+ callback_(NULL),
+ error_cb_(base::Bind(
+ &AudioRendererMixerInput::OnRenderError, base::Unretained(this))) {
+}
+
+AudioRendererMixerInput::~AudioRendererMixerInput() {
+ // Mixer is no longer safe to use after |remove_mixer_cb_| has been called.
+ if (initialized_)
+ remove_mixer_cb_.Run(params_);
+}
+
+void AudioRendererMixerInput::Initialize(
+ const AudioParameters& params,
+ AudioRendererSink::RenderCallback* callback) {
+ DCHECK(!initialized_);
+ params_ = params;
+ mixer_ = get_mixer_cb_.Run(params_);
+ callback_ = callback;
+ initialized_ = true;
+}
+
+void AudioRendererMixerInput::Start() {
+ DCHECK(initialized_);
+ DCHECK(!playing_);
+}
+
+void AudioRendererMixerInput::Stop() {
+ // Stop() may be called at any time, if Pause() hasn't been called we need to
+ // remove our mixer input before shutdown.
+ if (!playing_)
+ return;
+
+ mixer_->RemoveMixerInput(this);
+ playing_ = false;
+}
+
+void AudioRendererMixerInput::Play() {
+ DCHECK(initialized_);
+
+ if (playing_)
+ return;
+
+ mixer_->AddMixerInput(this, error_cb_);
+ playing_ = true;
+}
+
+void AudioRendererMixerInput::Pause() {
+ DCHECK(initialized_);
+
+ if (!playing_)
+ return;
+
+ mixer_->RemoveMixerInput(this);
+ playing_ = false;
+}
+
+bool AudioRendererMixerInput::SetVolume(double volume) {
+ volume_ = volume;
+ return true;
+}
+
+double AudioRendererMixerInput::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ int frames_filled = callback_->Render(
+ audio_bus, static_cast<int>(buffer_delay.InMillisecondsF() + 0.5));
+
+ // AudioConverter expects unfilled frames to be zeroed.
+ if (frames_filled < audio_bus->frames()) {
+ audio_bus->ZeroFramesPartial(
+ frames_filled, audio_bus->frames() - frames_filled);
+ }
+
+ return frames_filled > 0 ? volume_ : 0;
+}
+
+void AudioRendererMixerInput::OnRenderError() {
+ callback_->OnRenderError();
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer_mixer_input.h b/chromium/media/base/audio_renderer_mixer_input.h
new file mode 100644
index 00000000000..6b026cf9c29
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer_input.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
+#define MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "media/base/audio_converter.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+class AudioRendererMixer;
+
+class MEDIA_EXPORT AudioRendererMixerInput
+ : NON_EXPORTED_BASE(public AudioRendererSink),
+ public AudioConverter::InputCallback {
+ public:
+ typedef base::Callback<AudioRendererMixer*(
+ const AudioParameters& params)> GetMixerCB;
+ typedef base::Callback<void(const AudioParameters& params)> RemoveMixerCB;
+
+ AudioRendererMixerInput(
+ const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb);
+
+ // AudioRendererSink implementation.
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+ virtual void Initialize(const AudioParameters& params,
+ AudioRendererSink::RenderCallback* renderer) OVERRIDE;
+
+ // Called by AudioRendererMixer when an error occurs.
+ void OnRenderError();
+
+ protected:
+ virtual ~AudioRendererMixerInput();
+
+ private:
+ friend class AudioRendererMixerInputTest;
+
+ bool playing_;
+ bool initialized_;
+ double volume_;
+
+ // AudioConverter::InputCallback implementation.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Callbacks provided during construction which allow AudioRendererMixerInput
+ // to retrieve a mixer during Initialize() and notify when it's done with it.
+ GetMixerCB get_mixer_cb_;
+ RemoveMixerCB remove_mixer_cb_;
+
+ // AudioParameters received during Initialize().
+ AudioParameters params_;
+
+ // AudioRendererMixer provided through |get_mixer_cb_| during Initialize(),
+ // guaranteed to live (at least) until |remove_mixer_cb_| is called.
+ AudioRendererMixer* mixer_;
+
+ // Source of audio data which is provided to the mixer.
+ AudioRendererSink::RenderCallback* callback_;
+
+ // Error callback for handing to AudioRendererMixer.
+ base::Closure error_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
diff --git a/chromium/media/base/audio_renderer_mixer_input_unittest.cc b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
new file mode 100644
index 00000000000..9a019db5717
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "media/base/audio_renderer_mixer.h"
+#include "media/base/audio_renderer_mixer_input.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "media/base/mock_audio_renderer_sink.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kBitsPerChannel = 16;
+static const int kSampleRate = 48000;
+static const int kBufferSize = 8192;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+
+class AudioRendererMixerInputTest : public testing::Test {
+ public:
+ AudioRendererMixerInputTest() {
+ audio_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate,
+ kBitsPerChannel, kBufferSize);
+
+ CreateMixerInput();
+ fake_callback_.reset(new FakeAudioRenderCallback(0));
+ mixer_input_->Initialize(audio_parameters_, fake_callback_.get());
+ EXPECT_CALL(*this, RemoveMixer(testing::_));
+ audio_bus_ = AudioBus::Create(audio_parameters_);
+ }
+
+ void CreateMixerInput() {
+ mixer_input_ = new AudioRendererMixerInput(
+ base::Bind(
+ &AudioRendererMixerInputTest::GetMixer, base::Unretained(this)),
+ base::Bind(
+ &AudioRendererMixerInputTest::RemoveMixer, base::Unretained(this)));
+ }
+
+ AudioRendererMixer* GetMixer(const AudioParameters& params) {
+ if (!mixer_) {
+ scoped_refptr<MockAudioRendererSink> sink = new MockAudioRendererSink();
+ EXPECT_CALL(*sink.get(), Start());
+ EXPECT_CALL(*sink.get(), Stop());
+
+ mixer_.reset(new AudioRendererMixer(
+ audio_parameters_, audio_parameters_, sink));
+ }
+ return mixer_.get();
+ }
+
+ double ProvideInput() {
+ return mixer_input_->ProvideInput(audio_bus_.get(), base::TimeDelta());
+ }
+
+ MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
+
+ protected:
+ virtual ~AudioRendererMixerInputTest() {}
+
+ AudioParameters audio_parameters_;
+ scoped_ptr<AudioRendererMixer> mixer_;
+ scoped_refptr<AudioRendererMixerInput> mixer_input_;
+ scoped_ptr<FakeAudioRenderCallback> fake_callback_;
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest);
+};
+
+// Test that getting and setting the volume work as expected. The volume is
+// returned from ProvideInput() only when playing.
+TEST_F(AudioRendererMixerInputTest, GetSetVolume) {
+ mixer_input_->Start();
+ mixer_input_->Play();
+
+ // Starting volume should be 1.0.
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
+
+ const double kVolume = 0.5;
+ EXPECT_TRUE(mixer_input_->SetVolume(kVolume));
+ EXPECT_DOUBLE_EQ(ProvideInput(), kVolume);
+
+ mixer_input_->Stop();
+}
+
+// Test Start()/Play()/Pause()/Stop()/playing() all work as expected. Also
+// implicitly tests that AddMixerInput() and RemoveMixerInput() work without
+// crashing; functional tests for these methods are in AudioRendererMixerTest.
+TEST_F(AudioRendererMixerInputTest, StartPlayPauseStopPlaying) {
+ mixer_input_->Start();
+ mixer_input_->Play();
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
+ mixer_input_->Pause();
+ mixer_input_->Play();
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
+ mixer_input_->Stop();
+}
+
+// Test that Stop() can be called before Initialize() and Start().
+TEST_F(AudioRendererMixerInputTest, StopBeforeInitializeOrStart) {
+ // |mixer_input_| was initialized during construction.
+ mixer_input_->Stop();
+
+ // Verify Stop() works without Initialize() or Start().
+ CreateMixerInput();
+ mixer_input_->Stop();
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer_mixer_unittest.cc b/chromium/media/base/audio_renderer_mixer_unittest.cc
new file mode 100644
index 00000000000..8853068335c
--- /dev/null
+++ b/chromium/media/base/audio_renderer_mixer_unittest.cc
@@ -0,0 +1,468 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "media/base/audio_renderer_mixer.h"
+#include "media/base/audio_renderer_mixer_input.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "media/base/mock_audio_renderer_sink.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Parameters which control the many input case tests.
+static const int kMixerInputs = 8;
+static const int kMixerCycles = 3;
+
+// Parameters used for testing.
+static const int kBitsPerChannel = 32;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kHighLatencyBufferSize = 8192;
+static const int kLowLatencyBufferSize = 256;
+static const int kSampleRate = 48000;
+
+// Number of full sine wave cycles for each Render() call.
+static const int kSineCycles = 4;
+
+// Tuple of <input sampling rate, output sampling rate, epsilon>.
+typedef std::tr1::tuple<int, int, double> AudioRendererMixerTestData;
+class AudioRendererMixerTest
+ : public testing::TestWithParam<AudioRendererMixerTestData> {
+ public:
+ AudioRendererMixerTest()
+ : epsilon_(std::tr1::get<2>(GetParam())),
+ half_fill_(false) {
+ // Create input and output parameters based on test parameters.
+ input_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
+ std::tr1::get<0>(GetParam()), kBitsPerChannel, kHighLatencyBufferSize);
+ output_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout,
+ std::tr1::get<1>(GetParam()), 16, kLowLatencyBufferSize);
+
+ sink_ = new MockAudioRendererSink();
+ EXPECT_CALL(*sink_.get(), Start());
+ EXPECT_CALL(*sink_.get(), Stop());
+
+ mixer_.reset(new AudioRendererMixer(
+ input_parameters_, output_parameters_, sink_));
+ mixer_callback_ = sink_->callback();
+
+ audio_bus_ = AudioBus::Create(output_parameters_);
+ expected_audio_bus_ = AudioBus::Create(output_parameters_);
+
+ // Allocate one callback for generating expected results.
+ double step = kSineCycles / static_cast<double>(
+ output_parameters_.frames_per_buffer());
+ expected_callback_.reset(new FakeAudioRenderCallback(step));
+ }
+
+ AudioRendererMixer* GetMixer(const AudioParameters& params) {
+ return mixer_.get();
+ }
+
+ MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
+
+ void InitializeInputs(int count) {
+ mixer_inputs_.reserve(count);
+ fake_callbacks_.reserve(count);
+
+ // Setup FakeAudioRenderCallback step to compensate for resampling.
+ double scale_factor = input_parameters_.sample_rate() /
+ static_cast<double>(output_parameters_.sample_rate());
+ double step = kSineCycles / (scale_factor *
+ static_cast<double>(output_parameters_.frames_per_buffer()));
+
+ for (int i = 0; i < count; ++i) {
+ fake_callbacks_.push_back(new FakeAudioRenderCallback(step));
+ mixer_inputs_.push_back(new AudioRendererMixerInput(
+ base::Bind(&AudioRendererMixerTest::GetMixer,
+ base::Unretained(this)),
+ base::Bind(&AudioRendererMixerTest::RemoveMixer,
+ base::Unretained(this))));
+ mixer_inputs_[i]->Initialize(input_parameters_, fake_callbacks_[i]);
+ mixer_inputs_[i]->SetVolume(1.0f);
+ }
+ EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(count);
+ }
+
+ bool ValidateAudioData(int index, int frames, float scale, double epsilon) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ for (int j = index; j < frames; j++) {
+ double error = fabs(audio_bus_->channel(i)[j] -
+ expected_audio_bus_->channel(i)[j] * scale);
+ if (error > epsilon) {
+ EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale,
+ audio_bus_->channel(i)[j], epsilon)
+ << " i=" << i << ", j=" << j;
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool ValidateAudioData(int index, int frames, float scale) {
+ return ValidateAudioData(index, frames, scale, epsilon_);
+ }
+
+ bool RenderAndValidateAudioData(float scale) {
+ if (half_fill_) {
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i)
+ fake_callbacks_[i]->set_half_fill(true);
+ expected_callback_->set_half_fill(true);
+ // Initialize the AudioBus completely or we'll run into Valgrind problems
+ // during the verification step below.
+ expected_audio_bus_->Zero();
+ }
+
+ // Render actual audio data.
+ int frames = mixer_callback_->Render(audio_bus_.get(), 0);
+ if (frames != audio_bus_->frames())
+ return false;
+
+ // Render expected audio data (without scaling).
+ expected_callback_->Render(expected_audio_bus_.get(), 0);
+
+ if (half_fill_) {
+ // In this case, just verify that every frame was initialized, this will
+ // only fail under tooling such as valgrind.
+ return ValidateAudioData(
+ 0, frames, 0, std::numeric_limits<double>::max());
+ } else {
+ return ValidateAudioData(0, frames, scale);
+ }
+ }
+
+ // Fill |audio_bus_| fully with |value|.
+ void FillAudioData(float value) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ std::fill(audio_bus_->channel(i),
+ audio_bus_->channel(i) + audio_bus_->frames(), value);
+ }
+ }
+
+ // Verify silence when mixer inputs are in pre-Start() and post-Start().
+ void StartTest(int inputs) {
+ InitializeInputs(inputs);
+
+ // Verify silence before any inputs have been started. Fill the buffer
+ // before hand with non-zero data to ensure we get zeros back.
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+
+ // Start() all even numbered mixer inputs and ensure we still get silence.
+ for (size_t i = 0; i < mixer_inputs_.size(); i += 2)
+ mixer_inputs_[i]->Start();
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+
+ // Start() all mixer inputs and ensure we still get silence.
+ for (size_t i = 1; i < mixer_inputs_.size(); i += 2)
+ mixer_inputs_[i]->Start();
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify output when mixer inputs are in post-Play() state.
+ void PlayTest(int inputs) {
+ InitializeInputs(inputs);
+
+ // Play() all mixer inputs and ensure we get the right values.
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Play();
+ }
+
+ for (int i = 0; i < kMixerCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(mixer_inputs_.size()));
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify volume adjusted output when mixer inputs are in post-Play() state.
+ void PlayVolumeAdjustedTest(int inputs) {
+ InitializeInputs(inputs);
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Play();
+ }
+
+ // Set a different volume for each mixer input and verify the results.
+ float total_scale = 0;
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ float volume = static_cast<float>(i) / mixer_inputs_.size();
+ total_scale += volume;
+ EXPECT_TRUE(mixer_inputs_[i]->SetVolume(volume));
+ }
+ for (int i = 0; i < kMixerCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(total_scale));
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify output when mixer inputs can only partially fulfill a Render().
+ void PlayPartialRenderTest(int inputs) {
+ InitializeInputs(inputs);
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Play();
+ }
+
+ // Verify a properly filled buffer when half filled (remainder zeroed).
+ half_fill_ = true;
+ ASSERT_TRUE(RenderAndValidateAudioData(mixer_inputs_.size()));
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify output when mixer inputs are in Pause() state.
+ void PauseTest(int inputs) {
+ InitializeInputs(inputs);
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Play();
+ }
+
+ // Pause() all even numbered mixer inputs and ensure we get the right value.
+ for (size_t i = 0; i < mixer_inputs_.size(); i += 2)
+ mixer_inputs_[i]->Pause();
+ for (int i = 0; i < kMixerCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(mixer_inputs_.size() / 2));
+
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify output when mixer inputs are in post-Stop() state.
+ void StopTest(int inputs) {
+ InitializeInputs(inputs);
+
+ // Start() and Stop() all inputs.
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Stop();
+ }
+
+ // Verify we get silence back; fill |audio_bus_| before hand to be sure.
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+ }
+
+ protected:
+ virtual ~AudioRendererMixerTest() {}
+
+ scoped_refptr<MockAudioRendererSink> sink_;
+ scoped_ptr<AudioRendererMixer> mixer_;
+ AudioRendererSink::RenderCallback* mixer_callback_;
+ AudioParameters input_parameters_;
+ AudioParameters output_parameters_;
+ scoped_ptr<AudioBus> audio_bus_;
+ scoped_ptr<AudioBus> expected_audio_bus_;
+ std::vector< scoped_refptr<AudioRendererMixerInput> > mixer_inputs_;
+ ScopedVector<FakeAudioRenderCallback> fake_callbacks_;
+ scoped_ptr<FakeAudioRenderCallback> expected_callback_;
+ double epsilon_;
+ bool half_fill_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerTest);
+};
+
+class AudioRendererMixerBehavioralTest : public AudioRendererMixerTest {};
+
+ACTION_P(SignalEvent, event) {
+ event->Signal();
+}
+
+// Verify a mixer with no inputs returns silence for all requested frames.
+TEST_P(AudioRendererMixerTest, NoInputs) {
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+}
+
+// Test mixer output with one input in the pre-Start() and post-Start() state.
+TEST_P(AudioRendererMixerTest, OneInputStart) {
+ StartTest(1);
+}
+
+// Test mixer output with many inputs in the pre-Start() and post-Start() state.
+TEST_P(AudioRendererMixerTest, ManyInputStart) {
+ StartTest(kMixerInputs);
+}
+
+// Test mixer output with one input in the post-Play() state.
+TEST_P(AudioRendererMixerTest, OneInputPlay) {
+ PlayTest(1);
+}
+
+// Test mixer output with many inputs in the post-Play() state.
+TEST_P(AudioRendererMixerTest, ManyInputPlay) {
+ PlayTest(kMixerInputs);
+}
+
+// Test volume adjusted mixer output with one input in the post-Play() state.
+TEST_P(AudioRendererMixerTest, OneInputPlayVolumeAdjusted) {
+ PlayVolumeAdjustedTest(1);
+}
+
+// Test volume adjusted mixer output with many inputs in the post-Play() state.
+TEST_P(AudioRendererMixerTest, ManyInputPlayVolumeAdjusted) {
+ PlayVolumeAdjustedTest(kMixerInputs);
+}
+
+// Test mixer output with one input and partial Render() in post-Play() state.
+TEST_P(AudioRendererMixerTest, OneInputPlayPartialRender) {
+ PlayPartialRenderTest(1);
+}
+
+// Test mixer output with many inputs and partial Render() in post-Play() state.
+TEST_P(AudioRendererMixerTest, ManyInputPlayPartialRender) {
+ PlayPartialRenderTest(kMixerInputs);
+}
+
+// Test mixer output with one input in the post-Pause() state.
+TEST_P(AudioRendererMixerTest, OneInputPause) {
+ PauseTest(1);
+}
+
+// Test mixer output with many inputs in the post-Pause() state.
+TEST_P(AudioRendererMixerTest, ManyInputPause) {
+ PauseTest(kMixerInputs);
+}
+
+// Test mixer output with one input in the post-Stop() state.
+TEST_P(AudioRendererMixerTest, OneInputStop) {
+ StopTest(1);
+}
+
+// Test mixer output with many inputs in the post-Stop() state.
+TEST_P(AudioRendererMixerTest, ManyInputStop) {
+ StopTest(kMixerInputs);
+}
+
+// Test mixer with many inputs in mixed post-Stop() and post-Play() states.
+TEST_P(AudioRendererMixerTest, ManyInputMixedStopPlay) {
+ InitializeInputs(kMixerInputs);
+
+ // Start() all inputs.
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Start();
+
+ // Stop() all even numbered mixer inputs and Play() all odd numbered inputs
+ // and ensure we get the right value.
+ for (size_t i = 1; i < mixer_inputs_.size(); i += 2) {
+ mixer_inputs_[i - 1]->Stop();
+ mixer_inputs_[i]->Play();
+ }
+ ASSERT_TRUE(RenderAndValidateAudioData(std::max(
+ mixer_inputs_.size() / 2, static_cast<size_t>(1))));
+
+ for (size_t i = 1; i < mixer_inputs_.size(); i += 2)
+ mixer_inputs_[i]->Stop();
+}
+
+TEST_P(AudioRendererMixerBehavioralTest, OnRenderError) {
+ InitializeInputs(kMixerInputs);
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
+ mixer_inputs_[i]->Play();
+ EXPECT_CALL(*fake_callbacks_[i], OnRenderError()).Times(1);
+ }
+
+ mixer_callback_->OnRenderError();
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ mixer_inputs_[i]->Stop();
+}
+
+// Ensure constructing an AudioRendererMixerInput, but not initializing it does
+// not call RemoveMixer().
+TEST_P(AudioRendererMixerBehavioralTest, NoInitialize) {
+ EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(0);
+ scoped_refptr<AudioRendererMixerInput> audio_renderer_mixer =
+ new AudioRendererMixerInput(
+ base::Bind(&AudioRendererMixerTest::GetMixer,
+ base::Unretained(this)),
+ base::Bind(&AudioRendererMixerTest::RemoveMixer,
+ base::Unretained(this)));
+}
+
+// Ensure the physical stream is paused after a certain amount of time with no
+// inputs playing. The test will hang if the behavior is incorrect.
+TEST_P(AudioRendererMixerBehavioralTest, MixerPausesStream) {
+ const base::TimeDelta kPauseTime = base::TimeDelta::FromMilliseconds(500);
+ // This value can't be too low or valgrind, tsan will timeout on the bots.
+ const base::TimeDelta kTestTimeout = 10 * kPauseTime;
+ mixer_->set_pause_delay_for_testing(kPauseTime);
+
+ base::WaitableEvent pause_event(true, false);
+ EXPECT_CALL(*sink_.get(), Pause()).Times(2)
+ .WillRepeatedly(SignalEvent(&pause_event));
+ InitializeInputs(1);
+
+ // Ensure never playing the input results in a sink pause.
+ const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(100);
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ while (!pause_event.IsSignaled()) {
+ mixer_callback_->Render(audio_bus_.get(), 0);
+ base::PlatformThread::Sleep(kSleepTime);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
+ }
+ pause_event.Reset();
+
+ // Playing the input for the first time should cause a sink play.
+ mixer_inputs_[0]->Start();
+ EXPECT_CALL(*sink_.get(), Play());
+ mixer_inputs_[0]->Play();
+ mixer_inputs_[0]->Pause();
+
+ // Ensure once the input is paused the sink eventually pauses.
+ start_time = base::TimeTicks::Now();
+ while (!pause_event.IsSignaled()) {
+ mixer_callback_->Render(audio_bus_.get(), 0);
+ base::PlatformThread::Sleep(kSleepTime);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
+ }
+
+ mixer_inputs_[0]->Stop();
+}
+
+INSTANTIATE_TEST_CASE_P(
+ AudioRendererMixerTest, AudioRendererMixerTest, testing::Values(
+ // No resampling.
+ std::tr1::make_tuple(44100, 44100, 0.00000048),
+
+ // Upsampling.
+ std::tr1::make_tuple(44100, 48000, 0.033),
+
+ // Downsampling.
+ std::tr1::make_tuple(48000, 41000, 0.042)));
+
+// Test cases for behavior which is independent of parameters. Values() doesn't
+// support single item lists and we don't want these test cases to run for every
+// parameter set.
+INSTANTIATE_TEST_CASE_P(
+ AudioRendererMixerBehavioralTest, AudioRendererMixerBehavioralTest,
+ testing::ValuesIn(std::vector<AudioRendererMixerTestData>(
+ 1, std::tr1::make_tuple(44100, 44100, 0))));
+
+} // namespace media
diff --git a/chromium/media/base/audio_renderer_sink.h b/chromium/media/base/audio_renderer_sink.h
new file mode 100644
index 00000000000..b2f4ba0a902
--- /dev/null
+++ b/chromium/media/base/audio_renderer_sink.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_RENDERER_SINK_H_
+#define MEDIA_BASE_AUDIO_RENDERER_SINK_H_
+
+#include <vector>
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// AudioRendererSink is an interface representing the end-point for
+// rendered audio. An implementation is expected to
+// periodically call Render() on a callback object.
+
+class AudioRendererSink
+ : public base::RefCountedThreadSafe<media::AudioRendererSink> {
+ public:
+ class RenderCallback {
+ public:
+ // Attempts to completely fill all channels of |dest|, returns actual
+ // number of frames filled.
+ virtual int Render(AudioBus* dest, int audio_delay_milliseconds) = 0;
+
+ // Synchronized audio I/O - see InitializeIO() below.
+ virtual void RenderIO(AudioBus* source,
+ AudioBus* dest,
+ int audio_delay_milliseconds) {}
+
+ // Signals an error has occurred.
+ virtual void OnRenderError() = 0;
+
+ protected:
+ virtual ~RenderCallback() {}
+ };
+
+ // Sets important information about the audio stream format.
+ // It must be called before any of the other methods.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) = 0;
+
+ // Starts audio playback.
+ virtual void Start() = 0;
+
+ // Stops audio playback.
+ virtual void Stop() = 0;
+
+ // Pauses playback.
+ virtual void Pause() = 0;
+
+ // Resumes playback after calling Pause().
+ virtual void Play() = 0;
+
+ // Sets the playback volume, with range [0.0, 1.0] inclusive.
+ // Returns |true| on success.
+ virtual bool SetVolume(double volume) = 0;
+
+ protected:
+ friend class base::RefCountedThreadSafe<AudioRendererSink>;
+ virtual ~AudioRendererSink() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_RENDERER_SINK_H_
diff --git a/chromium/media/base/audio_splicer.cc b/chromium/media/base/audio_splicer.cc
new file mode 100644
index 00000000000..14b4199e0e3
--- /dev/null
+++ b/chromium/media/base/audio_splicer.cc
@@ -0,0 +1,130 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_splicer.h"
+
+#include <cstdlib>
+
+#include "base/logging.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+// Largest gap or overlap allowed by this class. Anything
+// larger than this will trigger an error.
+// This is an arbitrary value, but the initial selection of 50ms
+// roughly represents the duration of 2 compressed AAC or MP3 frames.
+static const int kMaxTimeDeltaInMilliseconds = 50;
+
+AudioSplicer::AudioSplicer(int samples_per_second)
+ : output_timestamp_helper_(samples_per_second),
+ min_gap_size_(2),
+ received_end_of_stream_(false) {
+}
+
+AudioSplicer::~AudioSplicer() {
+}
+
+void AudioSplicer::Reset() {
+ output_timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+ output_buffers_.clear();
+ received_end_of_stream_ = false;
+}
+
+bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
+ DCHECK(!received_end_of_stream_ || input->end_of_stream());
+
+ if (input->end_of_stream()) {
+ output_buffers_.push_back(input);
+ received_end_of_stream_ = true;
+ return true;
+ }
+
+ DCHECK(input->timestamp() != kNoTimestamp());
+ DCHECK(input->duration() > base::TimeDelta());
+ DCHECK_GT(input->frame_count(), 0);
+
+ if (output_timestamp_helper_.base_timestamp() == kNoTimestamp())
+ output_timestamp_helper_.SetBaseTimestamp(input->timestamp());
+
+ if (output_timestamp_helper_.base_timestamp() > input->timestamp()) {
+ DVLOG(1) << "Input timestamp is before the base timestamp.";
+ return false;
+ }
+
+ base::TimeDelta timestamp = input->timestamp();
+ base::TimeDelta expected_timestamp = output_timestamp_helper_.GetTimestamp();
+ base::TimeDelta delta = timestamp - expected_timestamp;
+
+ if (std::abs(delta.InMilliseconds()) > kMaxTimeDeltaInMilliseconds) {
+ DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
+ return false;
+ }
+
+ int frames_to_fill = 0;
+ if (delta != base::TimeDelta())
+ frames_to_fill = output_timestamp_helper_.GetFramesToTarget(timestamp);
+
+ if (frames_to_fill == 0 || std::abs(frames_to_fill) < min_gap_size_) {
+ AddOutputBuffer(input);
+ return true;
+ }
+
+ if (frames_to_fill > 0) {
+ DVLOG(1) << "Gap detected @ " << expected_timestamp.InMicroseconds()
+ << " us: " << delta.InMicroseconds() << " us";
+
+ // Create a buffer with enough silence samples to fill the gap and
+ // add it to the output buffer.
+ scoped_refptr<AudioBuffer> gap = AudioBuffer::CreateEmptyBuffer(
+ input->channel_count(),
+ frames_to_fill,
+ expected_timestamp,
+ output_timestamp_helper_.GetFrameDuration(frames_to_fill));
+ AddOutputBuffer(gap);
+
+ // Add the input buffer now that the gap has been filled.
+ AddOutputBuffer(input);
+ return true;
+ }
+
+ int frames_to_skip = -frames_to_fill;
+
+ DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
+ << " us: " << -delta.InMicroseconds() << " us";
+
+ if (input->frame_count() <= frames_to_skip) {
+ DVLOG(1) << "Dropping whole buffer";
+ return true;
+ }
+
+ // Copy the trailing samples that do not overlap samples already output
+ // into a new buffer. Add this new buffer to the output queue.
+ //
+ // TODO(acolwell): Implement a cross-fade here so the transition is less
+ // jarring.
+ input->TrimStart(frames_to_skip);
+ AddOutputBuffer(input);
+ return true;
+}
+
+bool AudioSplicer::HasNextBuffer() const {
+ return !output_buffers_.empty();
+}
+
+scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+ scoped_refptr<AudioBuffer> ret = output_buffers_.front();
+ output_buffers_.pop_front();
+ return ret;
+}
+
+void AudioSplicer::AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer) {
+ output_timestamp_helper_.AddFrames(buffer->frame_count());
+ output_buffers_.push_back(buffer);
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_splicer.h b/chromium/media/base/audio_splicer.h
new file mode 100644
index 00000000000..50445b2d54c
--- /dev/null
+++ b/chromium/media/base/audio_splicer.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_SPLICER_H_
+#define MEDIA_BASE_AUDIO_SPLICER_H_
+
+#include <deque>
+
+#include "base/memory/ref_counted.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBuffer;
+class AudioDecoderConfig;
+
+// Helper class that handles filling gaps and resolving overlaps.
+class MEDIA_EXPORT AudioSplicer {
+ public:
+ AudioSplicer(int samples_per_second);
+ ~AudioSplicer();
+
+ // Resets the splicer state by clearing the output buffers queue,
+ // and resetting the timestamp helper.
+ void Reset();
+
+ // Adds a new buffer full of samples or end of stream buffer to the splicer.
+ // Returns true if the buffer was accepted. False is returned if an error
+ // occurred.
+ bool AddInput(const scoped_refptr<AudioBuffer>& input);
+
+ // Returns true if the splicer has a buffer to return.
+ bool HasNextBuffer() const;
+
+ // Removes the next buffer from the output buffer queue and returns it.
+ // This should only be called if HasNextBuffer() returns true.
+ scoped_refptr<AudioBuffer> GetNextBuffer();
+
+ private:
+ void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
+
+ AudioTimestampHelper output_timestamp_helper_;
+
+ // Minimum gap size needed before the splicer will take action to
+ // fill a gap. This avoids periodically inserting and then dropping samples
+ // when the buffer timestamps are slightly off because of timestamp rounding
+ // in the source content. Unit is frames.
+ int min_gap_size_;
+
+ std::deque<scoped_refptr<AudioBuffer> > output_buffers_;
+ bool received_end_of_stream_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSplicer);
+};
+
+} // namespace media
+
+#endif
diff --git a/chromium/media/base/audio_splicer_unittest.cc b/chromium/media/base/audio_splicer_unittest.cc
new file mode 100644
index 00000000000..43902687fae
--- /dev/null
+++ b/chromium/media/base/audio_splicer_unittest.cc
@@ -0,0 +1,374 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_splicer.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const SampleFormat kSampleFormat = kSampleFormatF32;
+static const int kChannels = 1;
+static const int kDefaultSampleRate = 44100;
+static const int kDefaultBufferSize = 100;
+
+class AudioSplicerTest : public ::testing::Test {
+ public:
+ AudioSplicerTest()
+ : splicer_(kDefaultSampleRate),
+ input_timestamp_helper_(kDefaultSampleRate) {
+ input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta());
+ }
+
+ scoped_refptr<AudioBuffer> GetNextInputBuffer(float value) {
+ return GetNextInputBuffer(value, kDefaultBufferSize);
+ }
+
+ scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ kSampleFormat,
+ kChannels,
+ value,
+ 0.0f,
+ frame_size,
+ input_timestamp_helper_.GetTimestamp(),
+ input_timestamp_helper_.GetFrameDuration(frame_size));
+ input_timestamp_helper_.AddFrames(frame_size);
+ return buffer;
+ }
+
+ bool VerifyData(scoped_refptr<AudioBuffer> buffer, float value) {
+ int frames = buffer->frame_count();
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ for (int i = 0; i < frames; ++i) {
+ if (bus->channel(0)[i] != value)
+ return false;
+ }
+ return true;
+ }
+
+ protected:
+ AudioSplicer splicer_;
+ AudioTimestampHelper input_timestamp_helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSplicerTest);
+};
+
+TEST_F(AudioSplicerTest, PassThru) {
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Test single buffer pass-thru behavior.
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
+
+ // Test that multiple buffers can be queued in the splicer.
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(splicer_.AddInput(input_3));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
+ EXPECT_EQ(input_2->duration(), output_2->duration());
+ EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
+
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
+ EXPECT_EQ(input_3->duration(), output_3->duration());
+ EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
+}
+
+TEST_F(AudioSplicerTest, Reset) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+
+ splicer_.Reset();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Add some bytes to the timestamp helper so that the
+ // next buffer starts many frames beyond the end of
+ // |input_1|. This is to make sure that Reset() actually
+ // clears its state and doesn't try to insert a gap.
+ input_timestamp_helper_.AddFrames(100);
+
+ // Verify that a new input buffer passes through as expected.
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
+ EXPECT_EQ(input_2->duration(), output_2->duration());
+ EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
+}
+
+TEST_F(AudioSplicerTest, EndOfStream) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+ scoped_refptr<AudioBuffer> input_2 = AudioBuffer::CreateEOSBuffer();
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.2f);
+ EXPECT_TRUE(input_2->end_of_stream());
+
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+
+ EXPECT_TRUE(output_2->end_of_stream());
+
+ // Verify that buffers can be added again after Reset().
+ splicer_.Reset();
+ EXPECT_TRUE(splicer_.AddInput(input_3));
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
+ EXPECT_EQ(input_3->duration(), output_3->duration());
+ EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
+}
+
+
+// Test the gap insertion code.
+// +--------------+ +--------------+
+// |11111111111111| |22222222222222|
+// +--------------+ +--------------+
+// Results in:
+// +--------------+----+--------------+
+// |11111111111111|0000|22222222222222|
+// +--------------+----+--------------+
+TEST_F(AudioSplicerTest, GapInsertion) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+
+ // Add bytes to the timestamp helper so that the next buffer
+ // will have a starting timestamp that indicates a gap is
+ // present.
+ const int kGapSize = 7;
+ input_timestamp_helper_.AddFrames(kGapSize);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+
+ // Verify that a gap buffer is generated.
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Verify that the first input buffer passed through unmodified.
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
+
+ // Verify the contents of the gap buffer.
+ base::TimeDelta gap_timestamp =
+ input_1->timestamp() + input_1->duration();
+ base::TimeDelta gap_duration = input_2->timestamp() - gap_timestamp;
+ EXPECT_GT(gap_duration, base::TimeDelta());
+ EXPECT_EQ(gap_timestamp, output_2->timestamp());
+ EXPECT_EQ(gap_duration, output_2->duration());
+ EXPECT_EQ(kGapSize, output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.0f));
+
+ // Verify that the second input buffer passed through unmodified.
+ EXPECT_EQ(input_2->timestamp(), output_3->timestamp());
+ EXPECT_EQ(input_2->duration(), output_3->duration());
+ EXPECT_EQ(input_2->frame_count(), output_3->frame_count());
+ EXPECT_TRUE(VerifyData(output_3, 0.2f));
+}
+
+
+// Test that an error is signalled when the gap between input buffers is
+// too large.
+TEST_F(AudioSplicerTest, GapTooLarge) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+
+ // Add a seconds worth of bytes so that an unacceptably large
+ // gap exists between |input_1| and |input_2|.
+ const int kGapSize = kDefaultSampleRate;
+ input_timestamp_helper_.AddFrames(kGapSize);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_FALSE(splicer_.AddInput(input_2));
+
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+
+ // Verify that the first input buffer passed through unmodified.
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
+
+ // Verify that the second buffer is not available.
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Reset the timestamp helper so it can generate a buffer that is
+ // right after |input_1|.
+ input_timestamp_helper_.SetBaseTimestamp(
+ input_1->timestamp() + input_1->duration());
+
+ // Verify that valid buffers are still accepted.
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
+ EXPECT_TRUE(splicer_.AddInput(input_3));
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+ EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
+ EXPECT_EQ(input_3->duration(), output_2->duration());
+ EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.3f));
+}
+
+
+// Verifies that an error is signalled if AddInput() is called
+// with a timestamp that is earlier than the first buffer added.
+TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
+ input_timestamp_helper_.SetBaseTimestamp(
+ base::TimeDelta::FromMicroseconds(10));
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+
+ // Reset the timestamp helper so the next buffer will have a timestamp earlier
+ // than |input_1|.
+ input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta::FromSeconds(0));
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.1f);
+
+ EXPECT_GT(input_1->timestamp(), input_2->timestamp());
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_FALSE(splicer_.AddInput(input_2));
+}
+
+
+// Test when one buffer partially overlaps another.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +--------------+
+// |22222222222222|
+// +--------------+
+// Results in:
+// +--------------+----------+
+// |11111111111111|2222222222|
+// +--------------+----------+
+TEST_F(AudioSplicerTest, PartialOverlap) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+
+ // Reset timestamp helper so that the next buffer will have a
+ // timestamp that starts in the middle of |input_1|.
+ const int kOverlapSize = input_1->frame_count() / 4;
+ input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
+ input_timestamp_helper_.AddFrames(input_1->frame_count() - kOverlapSize);
+
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Verify that the first input buffer passed through unmodified.
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
+
+ // Verify that the second input buffer was truncated to only contain
+ // the samples that are after the end of |input_1|. Note that data is not
+ // copied, so |input_2|'s values are modified.
+ base::TimeDelta expected_timestamp =
+ input_1->timestamp() + input_1->duration();
+ base::TimeDelta expected_duration =
+ (input_2->timestamp() + input_2->duration()) - expected_timestamp;
+ EXPECT_EQ(expected_timestamp, output_2->timestamp());
+ EXPECT_EQ(expected_duration, output_2->duration());
+ EXPECT_TRUE(VerifyData(output_2, 0.2f));
+}
+
+
+// Test that an input buffer that is completely overlapped by a buffer
+// that was already added is dropped.
+// +--------------+
+// |11111111111111|
+// +--------------+
+// +-----+
+// |22222|
+// +-----+
+// +-------------+
+// |3333333333333|
+// +-------------+
+// Results in:
+// +--------------+-------------+
+// |11111111111111|3333333333333|
+// +--------------+-------------+
+TEST_F(AudioSplicerTest, DropBuffer) {
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+
+ // Reset timestamp helper so that the next buffer will have a
+ // timestamp that starts in the middle of |input_1|.
+ const int kOverlapOffset = input_1->frame_count() / 2;
+ const int kOverlapSize = input_1->frame_count() / 4;
+ input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
+ input_timestamp_helper_.AddFrames(kOverlapOffset);
+
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f, kOverlapSize);
+
+ // Reset the timestamp helper so the next buffer will be right after
+ // |input_1|.
+ input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
+ input_timestamp_helper_.AddFrames(input_1->frame_count());
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
+
+ EXPECT_TRUE(splicer_.AddInput(input_1));
+ EXPECT_TRUE(splicer_.AddInput(input_2));
+ EXPECT_TRUE(splicer_.AddInput(input_3));
+
+ EXPECT_TRUE(splicer_.HasNextBuffer());
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ EXPECT_FALSE(splicer_.HasNextBuffer());
+
+ // Verify that the first input buffer passed through unmodified.
+ EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
+ EXPECT_EQ(input_1->duration(), output_1->duration());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
+
+ // Verify that the second output buffer only contains
+ // the samples that are in |input_3|.
+ EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
+ EXPECT_EQ(input_3->duration(), output_2->duration());
+ EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.3f));
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_timestamp_helper.cc b/chromium/media/base/audio_timestamp_helper.cc
new file mode 100644
index 00000000000..38fde1f0bd9
--- /dev/null
+++ b/chromium/media/base/audio_timestamp_helper.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_timestamp_helper.h"
+
+#include "base/logging.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+AudioTimestampHelper::AudioTimestampHelper(int samples_per_second)
+ : base_timestamp_(kNoTimestamp()),
+ frame_count_(0) {
+ DCHECK_GT(samples_per_second, 0);
+ double fps = samples_per_second;
+ microseconds_per_frame_ = base::Time::kMicrosecondsPerSecond / fps;
+}
+
+void AudioTimestampHelper::SetBaseTimestamp(base::TimeDelta base_timestamp) {
+ base_timestamp_ = base_timestamp;
+ frame_count_ = 0;
+}
+
+base::TimeDelta AudioTimestampHelper::base_timestamp() const {
+ return base_timestamp_;
+}
+
+void AudioTimestampHelper::AddFrames(int frame_count) {
+ DCHECK_GE(frame_count, 0);
+ DCHECK(base_timestamp_ != kNoTimestamp());
+ frame_count_ += frame_count;
+}
+
+base::TimeDelta AudioTimestampHelper::GetTimestamp() const {
+ return ComputeTimestamp(frame_count_);
+}
+
+base::TimeDelta AudioTimestampHelper::GetFrameDuration(int frame_count) const {
+ DCHECK_GE(frame_count, 0);
+ base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frame_count);
+ return end_timestamp - GetTimestamp();
+}
+
+int64 AudioTimestampHelper::GetFramesToTarget(base::TimeDelta target) const {
+ DCHECK(base_timestamp_ != kNoTimestamp());
+ DCHECK(target >= base_timestamp_);
+
+ int64 delta_in_us = (target - GetTimestamp()).InMicroseconds();
+ if (delta_in_us == 0)
+ return 0;
+
+ // Compute a timestamp relative to |base_timestamp_| since timestamps
+ // created from |frame_count_| are computed relative to this base.
+ // This ensures that the time to frame computation here is the proper inverse
+ // of the frame to time computation in ComputeTimestamp().
+ base::TimeDelta delta_from_base = target - base_timestamp_;
+
+ // Compute frame count for the time delta. This computation rounds to
+ // the nearest whole number of frames.
+ double threshold = microseconds_per_frame_ / 2;
+ int64 target_frame_count =
+ (delta_from_base.InMicroseconds() + threshold) / microseconds_per_frame_;
+ return target_frame_count - frame_count_;
+}
+
+base::TimeDelta AudioTimestampHelper::ComputeTimestamp(
+ int64 frame_count) const {
+ DCHECK_GE(frame_count, 0);
+ DCHECK(base_timestamp_ != kNoTimestamp());
+ double frames_us = microseconds_per_frame_ * frame_count;
+ return base_timestamp_ + base::TimeDelta::FromMicroseconds(frames_us);
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_timestamp_helper.h b/chromium/media/base/audio_timestamp_helper.h
new file mode 100644
index 00000000000..8b5d50e66f6
--- /dev/null
+++ b/chromium/media/base/audio_timestamp_helper.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
+#define MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
+
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Generates timestamps for a sequence of audio sample frames. This class should
+// be used any place timestamps need to be calculated for a sequence of audio
+// samples. It helps avoid timestamps inaccuracies caused by rounding/truncation
+// in repeated sample count to timestamp conversions.
+//
+// The class is constructed with samples_per_second information so that it can
+// convert audio sample frame counts into timestamps. After the object is
+// constructed, SetBaseTimestamp() must be called to specify the starting
+// timestamp of the audio sequence. As audio samples are received, their frame
+// counts are added using AddFrames(). These frame counts are accumulated by
+// this class so GetTimestamp() can be used to determine the timestamp for the
+// samples that have been added. GetDuration() calculates the proper duration
+// values for samples added to the current timestamp. GetFramesToTarget()
+// determines the number of frames that need to be added/removed from the
+// accumulated frames to reach a target timestamp.
+class MEDIA_EXPORT AudioTimestampHelper {
+ public:
+ AudioTimestampHelper(int samples_per_second);
+
+ // Sets the base timestamp to |base_timestamp| and the sets count to 0.
+ void SetBaseTimestamp(base::TimeDelta base_timestamp);
+
+ base::TimeDelta base_timestamp() const;
+
+ // Adds |frame_count| to the frame counter.
+ // Note: SetBaseTimestamp() must be called with a value other than
+ // kNoTimestamp() before this method can be called.
+ void AddFrames(int frame_count);
+
+ // Get the current timestamp. This value is computed from the base_timestamp()
+ // and the number of sample frames that have been added so far.
+ base::TimeDelta GetTimestamp() const;
+
+ // Gets the duration if |frame_count| frames were added to the current
+ // timestamp reported by GetTimestamp(). This method ensures that
+ // (GetTimestamp() + GetFrameDuration(n)) will equal the timestamp that
+ // GetTimestamp() will return if AddFrames(n) is called.
+ base::TimeDelta GetFrameDuration(int frame_count) const;
+
+ // Returns the number of frames needed to reach the target timestamp.
+ // Note: |target| must be >= |base_timestamp_|.
+ int64 GetFramesToTarget(base::TimeDelta target) const;
+
+ private:
+ base::TimeDelta ComputeTimestamp(int64 frame_count) const;
+
+ double microseconds_per_frame_;
+
+ base::TimeDelta base_timestamp_;
+
+ // Number of frames accumulated by AddFrames() calls.
+ int64 frame_count_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTimestampHelper);
+};
+
+} // namespace media
+
+#endif
diff --git a/chromium/media/base/audio_timestamp_helper_unittest.cc b/chromium/media/base/audio_timestamp_helper_unittest.cc
new file mode 100644
index 00000000000..a0cfa3bbfa5
--- /dev/null
+++ b/chromium/media/base/audio_timestamp_helper_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kDefaultSampleRate = 44100;
+
+class AudioTimestampHelperTest : public ::testing::Test {
+ public:
+ AudioTimestampHelperTest() : helper_(kDefaultSampleRate) {
+ helper_.SetBaseTimestamp(base::TimeDelta());
+ }
+
+ // Adds frames to the helper and returns the current timestamp in
+ // microseconds.
+ int64 AddFrames(int frames) {
+ helper_.AddFrames(frames);
+ return helper_.GetTimestamp().InMicroseconds();
+ }
+
+ int64 FramesToTarget(int target_in_microseconds) {
+ return helper_.GetFramesToTarget(
+ base::TimeDelta::FromMicroseconds(target_in_microseconds));
+ }
+
+ void TestGetFramesToTargetRange(int frame_count, int start, int end) {
+ for (int i = start; i <= end; ++i) {
+ EXPECT_EQ(frame_count, FramesToTarget(i)) << " Failure for timestamp "
+ << i << " us.";
+ }
+ }
+
+ protected:
+ AudioTimestampHelper helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioTimestampHelperTest);
+};
+
+TEST_F(AudioTimestampHelperTest, Basic) {
+ EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
+
+ // Verify that the output timestamp is always rounded down to the
+ // nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds,
+ // which is why the timestamp sometimes increments by 23 microseconds
+ // and other times it increments by 22 microseconds.
+ EXPECT_EQ(0, AddFrames(0));
+ EXPECT_EQ(22, AddFrames(1));
+ EXPECT_EQ(45, AddFrames(1));
+ EXPECT_EQ(68, AddFrames(1));
+ EXPECT_EQ(90, AddFrames(1));
+ EXPECT_EQ(113, AddFrames(1));
+
+ // Verify that adding frames one frame at a time matches the timestamp
+ // returned if the same number of frames are added all at once.
+ base::TimeDelta timestamp_1 = helper_.GetTimestamp();
+ helper_.SetBaseTimestamp(kNoTimestamp());
+ EXPECT_TRUE(kNoTimestamp() == helper_.base_timestamp());
+ helper_.SetBaseTimestamp(base::TimeDelta());
+ EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
+
+ helper_.AddFrames(5);
+ EXPECT_EQ(113, helper_.GetTimestamp().InMicroseconds());
+ EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp());
+}
+
+
+TEST_F(AudioTimestampHelperTest, GetDuration) {
+ helper_.SetBaseTimestamp(base::TimeDelta::FromMicroseconds(100));
+
+ int frame_count = 5;
+ int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 };
+ for (size_t i = 0; i < arraysize(expected_durations); ++i) {
+ base::TimeDelta duration = helper_.GetFrameDuration(frame_count);
+ EXPECT_EQ(expected_durations[i], duration.InMicroseconds());
+
+ base::TimeDelta timestamp_1 = helper_.GetTimestamp() + duration;
+ helper_.AddFrames(frame_count);
+ base::TimeDelta timestamp_2 = helper_.GetTimestamp();
+ EXPECT_TRUE(timestamp_1 == timestamp_2);
+ }
+}
+
+TEST_F(AudioTimestampHelperTest, GetFramesToTarget) {
+ // Verify GetFramesToTarget() rounding behavior.
+ // 1 frame @ 44100 is ~22.67573 microseconds,
+
+ // Test values less than half of the frame duration.
+ TestGetFramesToTargetRange(0, 0, 11);
+
+ // Test values between half the frame duration & the
+ // full frame duration.
+ TestGetFramesToTargetRange(1, 12, 22);
+
+ // Verify that the same number of frames is returned up
+ // to the next half a frame.
+ TestGetFramesToTargetRange(1, 23, 34);
+
+ // Verify the next 3 ranges.
+ TestGetFramesToTargetRange(2, 35, 56);
+ TestGetFramesToTargetRange(3, 57, 79);
+ TestGetFramesToTargetRange(4, 80, 102);
+ TestGetFramesToTargetRange(5, 103, 124);
+
+ // Add frames to the helper so negative frame counts can be tested.
+ helper_.AddFrames(5);
+
+ // Note: The timestamp ranges must match the positive values
+ // tested above to verify that the code is rounding properly.
+ TestGetFramesToTargetRange(0, 103, 124);
+ TestGetFramesToTargetRange(-1, 80, 102);
+ TestGetFramesToTargetRange(-2, 57, 79);
+ TestGetFramesToTargetRange(-3, 35, 56);
+ TestGetFramesToTargetRange(-4, 12, 34);
+ TestGetFramesToTargetRange(-5, 0, 11);
+}
+
+} // namespace media
diff --git a/chromium/media/base/bind_to_loop.h b/chromium/media/base/bind_to_loop.h
new file mode 100644
index 00000000000..f224adbb266
--- /dev/null
+++ b/chromium/media/base/bind_to_loop.h
@@ -0,0 +1,172 @@
+// This file was GENERATED by command:
+// pump.py bind_to_loop.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
+#define MEDIA_BASE_BIND_TO_LOOP_H_
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+
+// This is a helper utility for base::Bind()ing callbacks on to particular
+// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
+// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
+// when |b| executes the callback, it does so on a particular MessageLoop.
+//
+// Typical usage: request to be called back on the current thread:
+// other->StartAsyncProcessAndCallMeBack(
+// media::BindToLoop(MessageLoopProxy::current(),
+// base::Bind(&MyClass::MyMethod, this)));
+//
+// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
+// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
+// and thus can't be used with arrays.
+
+namespace media {
+
+// Mimic base::internal::CallbackForward, replacing p.Pass() with
+// base::Passed(&p) to account for the extra layer of indirection.
+namespace internal {
+template <typename T>
+T& TrampolineForward(T& t) { return t; }
+
+template <typename T>
+base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
+ scoped_ptr<T>& p) { return base::Passed(&p); }
+
+template <typename T>
+base::internal::PassedWrapper<scoped_ptr<T[]> > TrampolineForward(
+ scoped_ptr<T[]>& p) { return base::Passed(&p); }
+
+template <typename T, typename R>
+base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
+ scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
+
+template <typename T>
+base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
+ ScopedVector<T>& p) { return base::Passed(&p); }
+
+template <typename T> struct TrampolineHelper;
+
+template <>
+struct TrampolineHelper<void()> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void()>& cb) {
+ loop->PostTask(FROM_HERE, base::Bind(cb));
+ }
+};
+
+
+template <typename A1>
+struct TrampolineHelper<void(A1)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1)>& cb, A1 a1) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1)));
+ }
+};
+
+
+template <typename A1, typename A2>
+struct TrampolineHelper<void(A1, A2)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2)>& cb, A1 a1, A2 a2) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3>
+struct TrampolineHelper<void(A1, A2, A3)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2, A3)>& cb, A1 a1, A2 a2, A3 a3) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2), internal::TrampolineForward(a3)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4>
+struct TrampolineHelper<void(A1, A2, A3, A4)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2, A3, A4)>& cb, A1 a1, A2 a2, A3 a3,
+ A4 a4) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2), internal::TrampolineForward(a3),
+ internal::TrampolineForward(a4)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2, A3, A4, A5)>& cb, A1 a1, A2 a2, A3 a3,
+ A4 a4, A5 a5) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2), internal::TrampolineForward(a3),
+ internal::TrampolineForward(a4), internal::TrampolineForward(a5)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2, A3, A4, A5, A6)>& cb, A1 a1, A2 a2,
+ A3 a3, A4 a4, A5 a5, A6 a6) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2), internal::TrampolineForward(a3),
+ internal::TrampolineForward(a4), internal::TrampolineForward(a5),
+ internal::TrampolineForward(a6)));
+ }
+};
+
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7>
+struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6, A7)> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void(A1, A2, A3, A4, A5, A6, A7)>& cb, A1 a1, A2 a2,
+ A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
+ loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2), internal::TrampolineForward(a3),
+ internal::TrampolineForward(a4), internal::TrampolineForward(a5),
+ internal::TrampolineForward(a6), internal::TrampolineForward(a7)));
+ }
+};
+
+
+} // namespace internal
+
+template<typename T>
+static base::Callback<T> BindToLoop(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<T>& cb) {
+ return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
+}
+
+template<typename T>
+static base::Callback<T> BindToCurrentLoop(
+ const base::Callback<T>& cb) {
+ return BindToLoop(base::MessageLoopProxy::current(), cb);
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_BIND_TO_LOOP_H_
diff --git a/chromium/media/base/bind_to_loop.h.pump b/chromium/media/base/bind_to_loop.h.pump
new file mode 100644
index 00000000000..09ae518e2ee
--- /dev/null
+++ b/chromium/media/base/bind_to_loop.h.pump
@@ -0,0 +1,100 @@
+$$ This is a pump file for generating file templates. Pump is a python
+$$ script that is part of the Google Test suite of utilities. Description
+$$ can be found here:
+$$
+$$ http://code.google.com/p/googletest/wiki/PumpManual
+$$
+
+$$ See comment for MAX_ARITY in base/bind.h.pump.
+$var MAX_ARITY = 7
+
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
+#define MEDIA_BASE_BIND_TO_LOOP_H_
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop_proxy.h"
+
+// This is a helper utility for base::Bind()ing callbacks on to particular
+// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
+// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
+// when |b| executes the callback, it does so on a particular MessageLoop.
+//
+// Typical usage: request to be called back on the current thread:
+// other->StartAsyncProcessAndCallMeBack(
+// media::BindToLoop(MessageLoopProxy::current(),
+// base::Bind(&MyClass::MyMethod, this)));
+//
+// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
+// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
+// and thus can't be used with arrays.
+
+namespace media {
+
+// Mimic base::internal::CallbackForward, replacing p.Pass() with
+// base::Passed(&p) to account for the extra layer of indirection.
+namespace internal {
+template <typename T>
+T& TrampolineForward(T& t) { return t; }
+
+template <typename T>
+base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
+ scoped_ptr<T>& p) { return base::Passed(&p); }
+
+template <typename T>
+base::internal::PassedWrapper<scoped_array<T> > TrampolineForward(
+ scoped_array<T>& p) { return base::Passed(&p); }
+
+template <typename T, typename R>
+base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
+ scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
+
+template <typename T>
+base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
+ ScopedVector<T>& p) { return base::Passed(&p); }
+
+template <typename T> struct TrampolineHelper;
+
+$range ARITY 0..MAX_ARITY
+$for ARITY [[
+$range ARG 1..ARITY
+
+template <$for ARG , [[typename A$(ARG)]]>
+struct TrampolineHelper<void($for ARG , [[A$(ARG)]])> {
+ static void Run(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<void($for ARG , [[A$(ARG)]])>& cb
+$if ARITY != 0 [[, ]]
+$for ARG , [[A$(ARG) a$(ARG)]]
+) {
+ loop->PostTask(FROM_HERE, base::Bind(cb
+$if ARITY != 0 [[, ]]
+$for ARG , [[internal::TrampolineForward(a$(ARG))]]));
+ }
+};
+
+
+]] $$ for ARITY
+
+} // namespace internal
+
+template<typename T>
+static base::Callback<T> BindToLoop(
+ const scoped_refptr<base::MessageLoopProxy>& loop,
+ const base::Callback<T>& cb) {
+ return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
+}
+
+template<typename T>
+static base::Callback<T> BindToCurrentLoop(
+ const base::Callback<T>& cb) {
+ return BindToLoop(base::MessageLoopProxy::current(), cb);
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_BIND_TO_LOOP_H_
diff --git a/chromium/media/base/bind_to_loop_unittest.cc b/chromium/media/base/bind_to_loop_unittest.cc
new file mode 100644
index 00000000000..0c7a3ddd194
--- /dev/null
+++ b/chromium/media/base/bind_to_loop_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/bind_to_loop.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+void BoundBoolSet(bool* var, bool val) {
+ *var = val;
+}
+
+void BoundBoolSetFromScopedPtr(bool* var, scoped_ptr<bool> val) {
+ *var = *val;
+}
+
+void BoundBoolSetFromScopedPtrMalloc(bool* var, scoped_ptr_malloc<bool> val) {
+ *var = val;
+}
+
+void BoundBoolSetFromScopedArray(bool* var, scoped_ptr<bool[]> val) {
+ *var = val[0];
+}
+
+void BoundBoolSetFromConstRef(bool* var, const bool& val) {
+ *var = val;
+}
+
+void BoundIntegersSet(int* a_var, int* b_var, int a_val, int b_val) {
+ *a_var = a_val;
+ *b_var = b_val;
+}
+
+// Various tests that check that the bound function is only actually executed
+// on the message loop, not during the original Run.
+class BindToLoopTest : public ::testing::Test {
+ public:
+ BindToLoopTest() : proxy_(loop_.message_loop_proxy()) {}
+
+ protected:
+ base::MessageLoop loop_;
+ scoped_refptr<base::MessageLoopProxy> proxy_;
+};
+
+TEST_F(BindToLoopTest, Closure) {
+ // Test the closure is run inside the loop, not outside it.
+ base::WaitableEvent waiter(false, false);
+ base::Closure cb = BindToLoop(proxy_, base::Bind(
+ &base::WaitableEvent::Signal, base::Unretained(&waiter)));
+ cb.Run();
+ EXPECT_FALSE(waiter.IsSignaled());
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(waiter.IsSignaled());
+}
+
+TEST_F(BindToLoopTest, Bool) {
+ bool bool_var = false;
+ base::Callback<void(bool)> cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSet, &bool_var));
+ cb.Run(true);
+ EXPECT_FALSE(bool_var);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_var);
+}
+
+TEST_F(BindToLoopTest, BoundScopedPtrBool) {
+ bool bool_val = false;
+ scoped_ptr<bool> scoped_ptr_bool(new bool(true));
+ base::Closure cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromScopedPtr, &bool_val, base::Passed(&scoped_ptr_bool)));
+ cb.Run();
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, PassedScopedPtrBool) {
+ bool bool_val = false;
+ scoped_ptr<bool> scoped_ptr_bool(new bool(true));
+ base::Callback<void(scoped_ptr<bool>)> cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromScopedPtr, &bool_val));
+ cb.Run(scoped_ptr_bool.Pass());
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, BoundScopedArrayBool) {
+ bool bool_val = false;
+ scoped_ptr<bool[]> scoped_array_bool(new bool[1]);
+ scoped_array_bool[0] = true;
+ base::Closure cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromScopedArray, &bool_val,
+ base::Passed(&scoped_array_bool)));
+ cb.Run();
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, PassedScopedArrayBool) {
+ bool bool_val = false;
+ scoped_ptr<bool[]> scoped_array_bool(new bool[1]);
+ scoped_array_bool[0] = true;
+ base::Callback<void(scoped_ptr<bool[]>)> cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromScopedArray, &bool_val));
+ cb.Run(scoped_array_bool.Pass());
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, BoundScopedPtrMallocBool) {
+ bool bool_val = false;
+ scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ static_cast<bool*>(malloc(sizeof(bool))));
+ *scoped_ptr_malloc_bool = true;
+ base::Closure cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromScopedPtrMalloc, &bool_val,
+ base::Passed(&scoped_ptr_malloc_bool)));
+ cb.Run();
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, PassedScopedPtrMallocBool) {
+ bool bool_val = false;
+ scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
+ static_cast<bool*>(malloc(sizeof(bool))));
+ *scoped_ptr_malloc_bool = true;
+ base::Callback<void(scoped_ptr_malloc<bool>)> cb = BindToLoop(
+ proxy_, base::Bind(&BoundBoolSetFromScopedPtrMalloc, &bool_val));
+ cb.Run(scoped_ptr_malloc_bool.Pass());
+ EXPECT_FALSE(bool_val);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_val);
+}
+
+TEST_F(BindToLoopTest, BoolConstRef) {
+ bool bool_var = false;
+ bool true_var = true;
+ const bool& true_ref = true_var;
+ base::Closure cb = BindToLoop(proxy_, base::Bind(
+ &BoundBoolSetFromConstRef, &bool_var, true_ref));
+ cb.Run();
+ EXPECT_FALSE(bool_var);
+ loop_.RunUntilIdle();
+ EXPECT_TRUE(bool_var);
+}
+
+TEST_F(BindToLoopTest, Integers) {
+ int a = 0;
+ int b = 0;
+ base::Callback<void(int, int)> cb = BindToLoop(proxy_, base::Bind(
+ &BoundIntegersSet, &a, &b));
+ cb.Run(1, -1);
+ EXPECT_EQ(a, 0);
+ EXPECT_EQ(b, 0);
+ loop_.RunUntilIdle();
+ EXPECT_EQ(a, 1);
+ EXPECT_EQ(b, -1);
+}
+
+} // namespace media
diff --git a/chromium/media/base/bit_reader.cc b/chromium/media/base/bit_reader.cc
new file mode 100644
index 00000000000..9f6f4098a1f
--- /dev/null
+++ b/chromium/media/base/bit_reader.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/bit_reader.h"
+
+namespace media {
+
+BitReader::BitReader(const uint8* data, off_t size)
+ : data_(data), bytes_left_(size), num_remaining_bits_in_curr_byte_(0) {
+ DCHECK(data_ != NULL && bytes_left_ > 0);
+
+ UpdateCurrByte();
+}
+
+BitReader::~BitReader() {}
+
+bool BitReader::SkipBits(int num_bits) {
+ DCHECK_GE(num_bits, 0);
+ DLOG_IF(INFO, num_bits > 100)
+ << "BitReader::SkipBits inefficient for large skips";
+
+ // Skip any bits in the current byte waiting to be processed, then
+ // process full bytes until less than 8 bits remaining.
+ while (num_bits > 0 && num_bits > num_remaining_bits_in_curr_byte_) {
+ num_bits -= num_remaining_bits_in_curr_byte_;
+ num_remaining_bits_in_curr_byte_ = 0;
+ UpdateCurrByte();
+
+ // If there is no more data remaining, only return true if we
+ // skipped all that were requested.
+ if (num_remaining_bits_in_curr_byte_ == 0)
+ return (num_bits == 0);
+ }
+
+ // Less than 8 bits remaining to skip. Use ReadBitsInternal to verify
+ // that the remaining bits we need exist, and adjust them as necessary
+ // for subsequent operations.
+ uint64 not_needed;
+ return ReadBitsInternal(num_bits, &not_needed);
+}
+
+int BitReader::bits_available() const {
+ return 8 * bytes_left_ + num_remaining_bits_in_curr_byte_;
+}
+
+bool BitReader::ReadBitsInternal(int num_bits, uint64* out) {
+ DCHECK_LE(num_bits, 64);
+
+ *out = 0;
+
+ while (num_remaining_bits_in_curr_byte_ != 0 && num_bits != 0) {
+ int bits_to_take = std::min(num_remaining_bits_in_curr_byte_, num_bits);
+
+ *out <<= bits_to_take;
+ *out += curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_to_take);
+ num_bits -= bits_to_take;
+ num_remaining_bits_in_curr_byte_ -= bits_to_take;
+ curr_byte_ &= (1 << num_remaining_bits_in_curr_byte_) - 1;
+
+ if (num_remaining_bits_in_curr_byte_ == 0)
+ UpdateCurrByte();
+ }
+
+ return num_bits == 0;
+}
+
+void BitReader::UpdateCurrByte() {
+ DCHECK_EQ(num_remaining_bits_in_curr_byte_, 0);
+
+ if (bytes_left_ == 0)
+ return;
+
+ // Load a new byte and advance pointers.
+ curr_byte_ = *data_;
+ ++data_;
+ --bytes_left_;
+ num_remaining_bits_in_curr_byte_ = 8;
+}
+
+} // namespace media
diff --git a/chromium/media/base/bit_reader.h b/chromium/media/base/bit_reader.h
new file mode 100644
index 00000000000..8c15891c915
--- /dev/null
+++ b/chromium/media/base/bit_reader.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BIT_READER_H_
+#define MEDIA_BASE_BIT_READER_H_
+
+#include <sys/types.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A class to read bit streams.
+class MEDIA_EXPORT BitReader {
+ public:
+ // Initialize the reader to start reading at |data|, |size| being size
+ // of |data| in bytes.
+ BitReader(const uint8* data, off_t size);
+ ~BitReader();
+
+ // Read |num_bits| next bits from stream and return in |*out|, first bit
+ // from the stream starting at |num_bits| position in |*out|.
+ // |num_bits| cannot be larger than the bits the type can hold.
+ // Return false if the given number of bits cannot be read (not enough
+ // bits in the stream), true otherwise. When return false, the stream will
+ // enter a state where further ReadBits/SkipBits operations will always
+ // return false unless |num_bits| is 0. The type |T| has to be a primitive
+ // integer type.
+ template<typename T> bool ReadBits(int num_bits, T *out) {
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
+ uint64 temp;
+ bool ret = ReadBitsInternal(num_bits, &temp);
+ *out = static_cast<T>(temp);
+ return ret;
+ }
+
+ // Skip |num_bits| next bits from stream. Return false if the given number of
+ // bits cannot be skipped (not enough bits in the stream), true otherwise.
+ // When return false, the stream will enter a state where further ReadBits/
+ // SkipBits operations will always return false unless |num_bits| is 0.
+ bool SkipBits(int num_bits);
+
+ // Returns the number of bits available for reading.
+ int bits_available() const;
+
+ private:
+ // Help function used by ReadBits to avoid inlining the bit reading logic.
+ bool ReadBitsInternal(int num_bits, uint64* out);
+
+ // Advance to the next byte, loading it into curr_byte_.
+ // If the num_remaining_bits_in_curr_byte_ is 0 after this function returns,
+ // the stream has reached the end.
+ void UpdateCurrByte();
+
+ // Pointer to the next unread (not in curr_byte_) byte in the stream.
+ const uint8* data_;
+
+ // Bytes left in the stream (without the curr_byte_).
+ off_t bytes_left_;
+
+ // Contents of the current byte; first unread bit starting at position
+ // 8 - num_remaining_bits_in_curr_byte_ from MSB.
+ uint8 curr_byte_;
+
+ // Number of bits remaining in curr_byte_
+ int num_remaining_bits_in_curr_byte_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BitReader);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BIT_READER_H_
diff --git a/chromium/media/base/bit_reader_unittest.cc b/chromium/media/base/bit_reader_unittest.cc
new file mode 100644
index 00000000000..3dca9c632da
--- /dev/null
+++ b/chromium/media/base/bit_reader_unittest.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/bit_reader.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(BitReaderTest, NormalOperationTest) {
+ uint8 value8;
+ uint64 value64;
+ // 0101 0101 1001 1001 repeats 4 times
+ uint8 buffer[] = {0x55, 0x99, 0x55, 0x99, 0x55, 0x99, 0x55, 0x99};
+ BitReader reader1(buffer, 6); // Initialize with 6 bytes only
+
+ EXPECT_TRUE(reader1.ReadBits(1, &value8));
+ EXPECT_EQ(value8, 0);
+ EXPECT_TRUE(reader1.ReadBits(8, &value8));
+ EXPECT_EQ(value8, 0xab); // 1010 1011
+ EXPECT_TRUE(reader1.ReadBits(7, &value64));
+ EXPECT_TRUE(reader1.ReadBits(32, &value64));
+ EXPECT_EQ(value64, 0x55995599u);
+ EXPECT_FALSE(reader1.ReadBits(1, &value8));
+ value8 = 0xff;
+ EXPECT_TRUE(reader1.ReadBits(0, &value8));
+ EXPECT_EQ(value8, 0);
+
+ BitReader reader2(buffer, 8);
+ EXPECT_TRUE(reader2.ReadBits(64, &value64));
+ EXPECT_EQ(value64, 0x5599559955995599ull);
+ EXPECT_FALSE(reader2.ReadBits(1, &value8));
+ EXPECT_TRUE(reader2.ReadBits(0, &value8));
+}
+
+TEST(BitReaderTest, ReadBeyondEndTest) {
+ uint8 value8;
+ uint8 buffer[] = {0x12};
+ BitReader reader1(buffer, sizeof(buffer));
+
+ EXPECT_TRUE(reader1.ReadBits(4, &value8));
+ EXPECT_FALSE(reader1.ReadBits(5, &value8));
+ EXPECT_FALSE(reader1.ReadBits(1, &value8));
+ EXPECT_TRUE(reader1.ReadBits(0, &value8));
+}
+
+TEST(BitReaderTest, SkipBitsTest) {
+ uint8 value8;
+ uint8 buffer[] = { 0x0a, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+ BitReader reader1(buffer, sizeof(buffer));
+
+ EXPECT_TRUE(reader1.SkipBits(2));
+ EXPECT_TRUE(reader1.ReadBits(3, &value8));
+ EXPECT_EQ(value8, 1);
+ EXPECT_TRUE(reader1.SkipBits(11));
+ EXPECT_TRUE(reader1.ReadBits(8, &value8));
+ EXPECT_EQ(value8, 3);
+ EXPECT_TRUE(reader1.SkipBits(76));
+ EXPECT_TRUE(reader1.ReadBits(4, &value8));
+ EXPECT_EQ(value8, 13);
+ EXPECT_FALSE(reader1.SkipBits(100));
+ EXPECT_TRUE(reader1.SkipBits(0));
+ EXPECT_FALSE(reader1.SkipBits(1));
+}
+
+} // namespace media
diff --git a/chromium/media/base/bitstream_buffer.h b/chromium/media/base/bitstream_buffer.h
new file mode 100644
index 00000000000..3a264737f42
--- /dev/null
+++ b/chromium/media/base/bitstream_buffer.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BITSTREAM_BUFFER_H_
+#define MEDIA_BASE_BITSTREAM_BUFFER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/shared_memory.h"
+
+namespace media {
+
+// Class for passing bitstream buffers around. Does not take ownership of the
+// data. This is the media-namespace equivalent of PP_VideoBitstreamBuffer_Dev.
+class BitstreamBuffer {
+ public:
+ BitstreamBuffer(int32 id, base::SharedMemoryHandle handle, size_t size)
+ : id_(id),
+ handle_(handle),
+ size_(size) {
+ }
+
+ int32 id() const { return id_; }
+ base::SharedMemoryHandle handle() const { return handle_; }
+ size_t size() const { return size_; }
+
+ private:
+ int32 id_;
+ base::SharedMemoryHandle handle_;
+ size_t size_;
+
+ // Allow compiler-generated copy & assign constructors.
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BITSTREAM_BUFFER_H_
diff --git a/chromium/media/base/buffers.h b/chromium/media/base/buffers.h
new file mode 100644
index 00000000000..6a6c7303d1d
--- /dev/null
+++ b/chromium/media/base/buffers.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Timestamps are derived directly from the encoded media file and are commonly
+// known as the presentation timestamp (PTS). Durations are a best-guess and
+// are usually derived from the sample/frame rate of the media file.
+//
+// Due to encoding and transmission errors, it is not guaranteed that timestamps
+// arrive in a monotonically increasing order nor that the next timestamp will
+// be equal to the previous timestamp plus the duration.
+//
+// In the ideal scenario for a 25fps movie, buffers are timestamped as followed:
+//
+// Buffer0 Buffer1 Buffer2 ... BufferN
+// Timestamp: 0us 40000us 80000us ... (N*40000)us
+// Duration*: 40000us 40000us 40000us ... 40000us
+//
+// *25fps = 0.04s per frame = 40000us per frame
+
+#ifndef MEDIA_BASE_BUFFERS_H_
+#define MEDIA_BASE_BUFFERS_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// TODO(scherkus): Move the contents of this file elsewhere.
+
+// Indicates an invalid or missing timestamp.
+MEDIA_EXPORT extern inline base::TimeDelta kNoTimestamp() {
+ return base::TimeDelta::FromMicroseconds(kint64min);
+}
+
+// Represents an infinite stream duration.
+MEDIA_EXPORT extern inline base::TimeDelta kInfiniteDuration() {
+ return base::TimeDelta::FromMicroseconds(kint64max);
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_BUFFERS_H_
diff --git a/chromium/media/base/byte_queue.cc b/chromium/media/base/byte_queue.cc
new file mode 100644
index 00000000000..534b55225ec
--- /dev/null
+++ b/chromium/media/base/byte_queue.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/byte_queue.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+// Default starting size for the queue.
+enum { kDefaultQueueSize = 1024 };
+
+ByteQueue::ByteQueue()
+ : buffer_(new uint8[kDefaultQueueSize]),
+ size_(kDefaultQueueSize),
+ offset_(0),
+ used_(0) {
+}
+
+ByteQueue::~ByteQueue() {}
+
+void ByteQueue::Reset() {
+ offset_ = 0;
+ used_ = 0;
+}
+
+void ByteQueue::Push(const uint8* data, int size) {
+ DCHECK(data);
+ DCHECK_GT(size, 0);
+
+ size_t size_needed = used_ + size;
+
+ // Check to see if we need a bigger buffer.
+ if (size_needed > size_) {
+ size_t new_size = 2 * size_;
+ while (size_needed > new_size && new_size > size_)
+ new_size *= 2;
+
+ // Sanity check to make sure we didn't overflow.
+ CHECK_GT(new_size, size_);
+
+ scoped_ptr<uint8[]> new_buffer(new uint8[new_size]);
+
+ // Copy the data from the old buffer to the start of the new one.
+ if (used_ > 0)
+ memcpy(new_buffer.get(), front(), used_);
+
+ buffer_.reset(new_buffer.release());
+ size_ = new_size;
+ offset_ = 0;
+ } else if ((offset_ + used_ + size) > size_) {
+ // The buffer is big enough, but we need to move the data in the queue.
+ memmove(buffer_.get(), front(), used_);
+ offset_ = 0;
+ }
+
+ memcpy(front() + used_, data, size);
+ used_ += size;
+}
+
+void ByteQueue::Peek(const uint8** data, int* size) const {
+ DCHECK(data);
+ DCHECK(size);
+ *data = front();
+ *size = used_;
+}
+
+void ByteQueue::Pop(int count) {
+ DCHECK_LE(count, used_);
+
+ offset_ += count;
+ used_ -= count;
+
+ // Move the offset back to 0 if we have reached the end of the buffer.
+ if (offset_ == size_) {
+ DCHECK_EQ(used_, 0);
+ offset_ = 0;
+ }
+}
+
+uint8* ByteQueue::front() const { return buffer_.get() + offset_; }
+
+} // namespace media
diff --git a/chromium/media/base/byte_queue.h b/chromium/media/base/byte_queue.h
new file mode 100644
index 00000000000..f25328d3628
--- /dev/null
+++ b/chromium/media/base/byte_queue.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_BYTE_QUEUE_H_
+#define MEDIA_BASE_BYTE_QUEUE_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Represents a queue of bytes.
+// Data is added to the end of the queue via an Push() call and removed via
+// Pop(). The contents of the queue can be observed via the Peek() method.
+// This class manages the underlying storage of the queue and tries to minimize
+// the number of buffer copies when data is appended and removed.
+class MEDIA_EXPORT ByteQueue {
+ public:
+ ByteQueue();
+ ~ByteQueue();
+
+ // Reset the queue to the empty state.
+ void Reset();
+
+ // Appends new bytes onto the end of the queue.
+ void Push(const uint8* data, int size);
+
+ // Get a pointer to the front of the queue and the queue size.
+ // These values are only valid until the next Push() or
+ // Pop() call.
+ void Peek(const uint8** data, int* size) const;
+
+ // Remove |count| bytes from the front of the queue.
+ void Pop(int count);
+
+ private:
+ // Returns a pointer to the front of the queue.
+ uint8* front() const;
+
+ scoped_ptr<uint8[]> buffer_;
+
+ // Size of |buffer_|.
+ size_t size_;
+
+ // Offset from the start of |buffer_| that marks the front of the queue.
+ size_t offset_;
+
+ // Number of bytes stored in the queue.
+ int used_;
+
+ DISALLOW_COPY_AND_ASSIGN(ByteQueue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_BYTE_QUEUE_H_
diff --git a/chromium/media/base/callback_holder.h b/chromium/media/base/callback_holder.h
new file mode 100644
index 00000000000..2ea5edbeb19
--- /dev/null
+++ b/chromium/media/base/callback_holder.h
@@ -0,0 +1,88 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CALLBACK_HOLDER_H_
+#define MEDIA_BASE_CALLBACK_HOLDER_H_
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "media/base/bind_to_loop.h"
+
+namespace media {
+
+// A helper class that can hold a callback from being fired.
+template <typename CB> class CallbackHolder {
+ public:
+ CallbackHolder() : hold_(false) {}
+
+ ~CallbackHolder() {
+ // Make sure all callbacks are satisfied!
+ DCHECK(!hold_);
+ DCHECK(original_cb_.is_null());
+ DCHECK(held_cb_.is_null());
+ }
+
+ // Sets the callback to be potentially held.
+ void SetCallback(const CB& cb) {
+ DCHECK(original_cb_.is_null());
+ DCHECK(held_cb_.is_null());
+ original_cb_ = cb;
+ }
+
+ bool IsNull() const {
+ return original_cb_.is_null() && held_cb_.is_null();
+ }
+
+ // Holds the callback when Run() is called.
+ void HoldCallback() { hold_ = true; }
+
+ // Runs or holds the callback as specified by |hold_|.
+ // This method has overloaded versions to support different types of CB.
+ void RunOrHold() {
+ DCHECK(held_cb_.is_null());
+ if (hold_)
+ held_cb_ = base::ResetAndReturn(&original_cb_);
+ else
+ base::ResetAndReturn(&original_cb_).Run();
+ }
+
+ template <typename A1> void RunOrHold(A1 a1) {
+ DCHECK(held_cb_.is_null());
+ if (hold_) {
+ held_cb_ = base::Bind(base::ResetAndReturn(&original_cb_),
+ internal::TrampolineForward(a1));
+ } else {
+ base::ResetAndReturn(&original_cb_).Run(a1);
+ }
+ }
+
+ template <typename A1, typename A2> void RunOrHold(A1 a1, A2 a2) {
+ DCHECK(held_cb_.is_null());
+ if (hold_) {
+ held_cb_ = base::Bind(base::ResetAndReturn(&original_cb_),
+ internal::TrampolineForward(a1),
+ internal::TrampolineForward(a2));
+ } else {
+ base::ResetAndReturn(&original_cb_).Run(a1, a2);
+ }
+ }
+
+ // Releases and runs the held callback.
+ void RunHeldCallback() {
+ DCHECK(hold_);
+ DCHECK(!held_cb_.is_null());
+ hold_ = false;
+ base::ResetAndReturn(&held_cb_).Run();
+ }
+
+ private:
+ bool hold_;
+ CB original_cb_;
+ base::Closure held_cb_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CALLBACK_HOLDER_H_
diff --git a/chromium/media/base/callback_holder_unittest.cc b/chromium/media/base/callback_holder_unittest.cc
new file mode 100644
index 00000000000..e06a930ed33
--- /dev/null
+++ b/chromium/media/base/callback_holder_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/callback_holder.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void SetBool(bool* var) {
+ DCHECK(!*var);
+ *var = true;
+}
+
+static void CopyVar(int var1, int* var2) {
+ DCHECK_NE(var1, *var2);
+ *var2 = var1;
+}
+
+TEST(CallbackHolderTest, SetAfterHold_Closure) {
+ CallbackHolder<base::Closure> cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ cb.HoldCallback();
+
+ bool closure_called = false;
+ cb.SetCallback(base::Bind(&SetBool, &closure_called));
+ EXPECT_FALSE(cb.IsNull());
+
+ cb.RunOrHold();
+ EXPECT_FALSE(closure_called);
+
+ EXPECT_FALSE(cb.IsNull());
+ cb.RunHeldCallback();
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_TRUE(closure_called);
+}
+
+TEST(CallbackHolderTest, HoldAfterSet_Closure) {
+ CallbackHolder<base::Closure> cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ bool closure_called = false;
+ cb.SetCallback(base::Bind(&SetBool, &closure_called));
+ EXPECT_FALSE(cb.IsNull());
+
+ cb.HoldCallback();
+
+ cb.RunOrHold();
+ EXPECT_FALSE(closure_called);
+ EXPECT_FALSE(cb.IsNull());
+ cb.RunHeldCallback();
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_TRUE(closure_called);
+}
+
+TEST(CallbackHolderTest, NotHold_Closure) {
+ CallbackHolder<base::Closure> cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ bool closure_called = false;
+ cb.SetCallback(base::Bind(&SetBool, &closure_called));
+ EXPECT_FALSE(cb.IsNull());
+
+ cb.RunOrHold();
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_TRUE(closure_called);
+}
+
+TEST(CallbackHolderTest, SetAfterHold_Callback) {
+ CallbackHolder<base::Callback<void(int, int*)> > cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ cb.HoldCallback();
+
+ cb.SetCallback(base::Bind(&CopyVar));
+ EXPECT_FALSE(cb.IsNull());
+
+ int var1 = 100;
+ int var2 = 0;
+ cb.RunOrHold(var1, &var2);
+ EXPECT_FALSE(cb.IsNull());
+ EXPECT_NE(var1, var2);
+
+ cb.RunHeldCallback();
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_EQ(var1, var2);
+}
+
+TEST(CallbackHolderTest, HoldAfterSet_Callback) {
+ CallbackHolder<base::Callback<void(int, int*)> > cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ cb.SetCallback(base::Bind(&CopyVar));
+ EXPECT_FALSE(cb.IsNull());
+
+ cb.HoldCallback();
+
+ int var1 = 100;
+ int var2 = 0;
+ cb.RunOrHold(var1, &var2);
+ EXPECT_FALSE(cb.IsNull());
+ EXPECT_NE(var1, var2);
+
+ cb.RunHeldCallback();
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_EQ(var1, var2);
+}
+
+TEST(CallbackHolderTest, NotHold_Callback) {
+ CallbackHolder<base::Callback<void(int, int*)> > cb;
+ EXPECT_TRUE(cb.IsNull());
+
+ cb.SetCallback(base::Bind(&CopyVar));
+ EXPECT_FALSE(cb.IsNull());
+
+ int var1 = 100;
+ int var2 = 0;
+ cb.RunOrHold(var1, &var2);
+ EXPECT_TRUE(cb.IsNull());
+ EXPECT_EQ(var1, var2);
+}
+
+} // namespace media
diff --git a/chromium/media/base/channel_layout.cc b/chromium/media/base/channel_layout.cc
new file mode 100644
index 00000000000..e895ddc8541
--- /dev/null
+++ b/chromium/media/base/channel_layout.cc
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/channel_layout.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace media {
+
+static const int kLayoutToChannels[] = {
+ 0, // CHANNEL_LAYOUT_NONE
+ 0, // CHANNEL_LAYOUT_UNSUPPORTED
+ 1, // CHANNEL_LAYOUT_MONO
+ 2, // CHANNEL_LAYOUT_STEREO
+ 3, // CHANNEL_LAYOUT_2_1
+ 3, // CHANNEL_LAYOUT_SURROUND
+ 4, // CHANNEL_LAYOUT_4_0
+ 4, // CHANNEL_LAYOUT_2_2
+ 4, // CHANNEL_LAYOUT_QUAD
+ 5, // CHANNEL_LAYOUT_5_0
+ 6, // CHANNEL_LAYOUT_5_1
+ 5, // CHANNEL_LAYOUT_5_0_BACK
+ 6, // CHANNEL_LAYOUT_5_1_BACK
+ 7, // CHANNEL_LAYOUT_7_0
+ 8, // CHANNEL_LAYOUT_7_1
+ 8, // CHANNEL_LAYOUT_7_1_WIDE
+ 2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
+ 3, // CHANNEL_LAYOUT_2POINT1
+ 4, // CHANNEL_LAYOUT_3_1
+ 5, // CHANNEL_LAYOUT_4_1
+ 6, // CHANNEL_LAYOUT_6_0
+ 6, // CHANNEL_LAYOUT_6_0_FRONT
+ 6, // CHANNEL_LAYOUT_HEXAGONAL
+ 7, // CHANNEL_LAYOUT_6_1
+ 7, // CHANNEL_LAYOUT_6_1_BACK
+ 7, // CHANNEL_LAYOUT_6_1_FRONT
+ 7, // CHANNEL_LAYOUT_7_0_FRONT
+ 8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
+ 8, // CHANNEL_LAYOUT_OCTAGONAL
+ 0, // CHANNEL_LAYOUT_DISCRETE
+};
+
+// The channel orderings for each layout as specified by FFmpeg. Each value
+// represents the index of each channel in each layout. Values of -1 mean the
+// channel at that index is not used for that layout.For example, the left side
+// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
+// the order is L, R, C, LFE, LS, RS), so
+// kChannelOrderings[CHANNEL_LAYOUT_5POINT1][SIDE_LEFT] = 4;
+static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
+ // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
+
+ // CHANNEL_LAYOUT_NONE
+ { -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_UNSUPPORTED
+ { -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_MONO
+ { -1 , -1 , 0 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_STEREO
+ { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_2_1
+ { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_SURROUND
+ { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_4_0
+ { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_2_2
+ { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , 3 },
+
+ // CHANNEL_LAYOUT_QUAD
+ { 0 , 1 , -1 , -1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_5_0
+ { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , 3 , 4 },
+
+ // CHANNEL_LAYOUT_5_1
+ { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , 4 , 5 },
+
+ // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
+
+ // CHANNEL_LAYOUT_5_0_BACK
+ { 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_5_1_BACK
+ { 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_7_0
+ { 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , -1 , 3 , 4 },
+
+ // CHANNEL_LAYOUT_7_1
+ { 0 , 1 , 2 , 3 , 6 , 7 , -1 , -1 , -1 , 4 , 5 },
+
+ // CHANNEL_LAYOUT_7_1_WIDE
+ { 0 , 1 , 2 , 3 , -1 , -1 , 6 , 7 , -1 , 4 , 5 },
+
+ // CHANNEL_LAYOUT_STEREO_DOWNMIX
+ { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_2POINT1
+ { 0 , 1 , -1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_3_1
+ { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_4_1
+ { 0 , 1 , 2 , 4 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_6_0
+ { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 5 , 3 , 4 },
+
+ // CHANNEL_LAYOUT_6_0_FRONT
+ { 0 , 1 , -1 , -1 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
+
+ // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
+
+ // CHANNEL_LAYOUT_HEXAGONAL
+ { 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , 5 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_6_1
+ { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , 6 , 4 , 5 },
+
+ // CHANNEL_LAYOUT_6_1_BACK
+ { 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , 6 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_6_1_FRONT
+ { 0 , 1 , -1 , 6 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
+
+ // CHANNEL_LAYOUT_7_0_FRONT
+ { 0 , 1 , 2 , -1 , -1 , -1 , 5 , 6 , -1 , 3 , 4 },
+
+ // CHANNEL_LAYOUT_7_1_WIDE_BACK
+ { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , -1 , -1 , -1 },
+
+ // CHANNEL_LAYOUT_OCTAGONAL
+ { 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , 7 , 3 , 4 },
+
+ // CHANNEL_LAYOUT_DISCRETE
+ { -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
+
+ // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
+};
+
+int ChannelLayoutToChannelCount(ChannelLayout layout) {
+ DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
+ return kLayoutToChannels[layout];
+}
+
+// Converts a channel count into a channel layout.
+ChannelLayout GuessChannelLayout(int channels) {
+ switch (channels) {
+ case 1:
+ return CHANNEL_LAYOUT_MONO;
+ case 2:
+ return CHANNEL_LAYOUT_STEREO;
+ case 3:
+ return CHANNEL_LAYOUT_SURROUND;
+ case 4:
+ return CHANNEL_LAYOUT_QUAD;
+ case 5:
+ return CHANNEL_LAYOUT_5_0;
+ case 6:
+ return CHANNEL_LAYOUT_5_1;
+ case 7:
+ return CHANNEL_LAYOUT_6_1;
+ case 8:
+ return CHANNEL_LAYOUT_7_1;
+ default:
+ DVLOG(1) << "Unsupported channel count: " << channels;
+ }
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+}
+
+int ChannelOrder(ChannelLayout layout, Channels channel) {
+ DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
+ DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
+ return kChannelOrderings[layout][channel];
+}
+
+} // namespace media
diff --git a/chromium/media/base/channel_layout.h b/chromium/media/base/channel_layout.h
new file mode 100644
index 00000000000..4c96ca517f4
--- /dev/null
+++ b/chromium/media/base/channel_layout.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CHANNEL_LAYOUT_H_
+#define MEDIA_BASE_CHANNEL_LAYOUT_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Enumerates the various representations of the ordering of audio channels.
+// Logged to UMA, so never reuse a value, always add new/greater ones!
+enum ChannelLayout {
+ CHANNEL_LAYOUT_NONE = 0,
+ CHANNEL_LAYOUT_UNSUPPORTED = 1,
+
+ // Front C
+ CHANNEL_LAYOUT_MONO = 2,
+
+ // Front L, Front R
+ CHANNEL_LAYOUT_STEREO = 3,
+
+ // Front L, Front R, Back C
+ CHANNEL_LAYOUT_2_1 = 4,
+
+ // Front L, Front R, Front C
+ CHANNEL_LAYOUT_SURROUND = 5,
+
+ // Front L, Front R, Front C, Back C
+ CHANNEL_LAYOUT_4_0 = 6,
+
+ // Front L, Front R, Side L, Side R
+ CHANNEL_LAYOUT_2_2 = 7,
+
+ // Front L, Front R, Back L, Back R
+ CHANNEL_LAYOUT_QUAD = 8,
+
+ // Front L, Front R, Front C, Side L, Side R
+ CHANNEL_LAYOUT_5_0 = 9,
+
+ // Front L, Front R, Front C, Side L, Side R, LFE
+ CHANNEL_LAYOUT_5_1 = 10,
+
+ // Front L, Front R, Front C, Back L, Back R
+ CHANNEL_LAYOUT_5_0_BACK = 11,
+
+ // Front L, Front R, Front C, Back L, Back R, LFE
+ CHANNEL_LAYOUT_5_1_BACK = 12,
+
+ // Front L, Front R, Front C, Side L, Side R, Back L, Back R
+ CHANNEL_LAYOUT_7_0 = 13,
+
+ // Front L, Front R, Front C, Side L, Side R, LFE, Back L, Back R
+ CHANNEL_LAYOUT_7_1 = 14,
+
+ // Front L, Front R, Front C, Side L, Side R, LFE, Front LofC, Front RofC
+ CHANNEL_LAYOUT_7_1_WIDE = 15,
+
+ // Stereo L, Stereo R
+ CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
+
+ // Stereo L, Stereo R, LFE
+ CHANNEL_LAYOUT_2POINT1 = 17,
+
+ // Stereo L, Stereo R, Front C, LFE
+ CHANNEL_LAYOUT_3_1 = 18,
+
+ // Stereo L, Stereo R, Front C, Rear C, LFE
+ CHANNEL_LAYOUT_4_1 = 19,
+
+ // Stereo L, Stereo R, Front C, Side L, Side R, Back C
+ CHANNEL_LAYOUT_6_0 = 20,
+
+ // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
+ CHANNEL_LAYOUT_6_0_FRONT = 21,
+
+ // Stereo L, Stereo R, Side L, Side R, Front C, Rear C.
+ CHANNEL_LAYOUT_HEXAGONAL = 22,
+
+ // Stereo L, Stereo R, Side L, Side R, Front C, Rear Center, LFE
+ CHANNEL_LAYOUT_6_1 = 23,
+
+ // Stereo L, Stereo R, Back L, Back R, Front C, Rear Center, LFE
+ CHANNEL_LAYOUT_6_1_BACK = 24,
+
+ // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
+ CHANNEL_LAYOUT_6_1_FRONT = 25,
+
+ // Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
+ CHANNEL_LAYOUT_7_0_FRONT = 26,
+
+ // Front L, Front R, Front C, Back L, Back R, LFE, Front LofC, Front RofC
+ CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
+
+ // Front L, Front R, Front C, Side L, Side R, Rear C, Back L, Back R.
+ CHANNEL_LAYOUT_OCTAGONAL = 28,
+
+ // Channels are not explicitly mapped to speakers.
+ CHANNEL_LAYOUT_DISCRETE = 29,
+
+ // Total number of layouts.
+ CHANNEL_LAYOUT_MAX // Must always be last!
+};
+
+enum Channels {
+ LEFT = 0,
+ RIGHT,
+ CENTER,
+ LFE,
+ BACK_LEFT,
+ BACK_RIGHT,
+ LEFT_OF_CENTER,
+ RIGHT_OF_CENTER,
+ BACK_CENTER,
+ SIDE_LEFT,
+ SIDE_RIGHT,
+ CHANNELS_MAX
+};
+
+// Returns the expected channel position in an interleaved stream. Values of -1
+// mean the channel at that index is not used for that layout. Values range
+// from 0 to CHANNELS_MAX - 1.
+MEDIA_EXPORT int ChannelOrder(ChannelLayout layout, Channels channel);
+
+// Returns the number of channels in a given ChannelLayout.
+MEDIA_EXPORT int ChannelLayoutToChannelCount(ChannelLayout layout);
+
+// Given the number of channels, return the best layout,
+// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
+MEDIA_EXPORT ChannelLayout GuessChannelLayout(int channels);
+
+} // namespace media
+
+#endif // MEDIA_BASE_CHANNEL_LAYOUT_H_
diff --git a/chromium/media/base/channel_mixer.cc b/chromium/media/base/channel_mixer.cc
new file mode 100644
index 00000000000..3de63fe8bf1
--- /dev/null
+++ b/chromium/media/base/channel_mixer.cc
@@ -0,0 +1,406 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_SQRT1_2.
+#define _USE_MATH_DEFINES
+
+#include "media/base/channel_mixer.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/logging.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+// Default scale factor for mixing two channels together. We use a different
+// value for stereo -> mono and mono -> stereo mixes.
+static const float kEqualPowerScale = static_cast<float>(M_SQRT1_2);
+
+static void ValidateLayout(ChannelLayout layout) {
+ CHECK_NE(layout, CHANNEL_LAYOUT_NONE);
+ CHECK_NE(layout, CHANNEL_LAYOUT_MAX);
+ CHECK_NE(layout, CHANNEL_LAYOUT_UNSUPPORTED);
+ CHECK_NE(layout, CHANNEL_LAYOUT_DISCRETE);
+
+ // Verify there's at least one channel. Should always be true here by virtue
+ // of not being one of the invalid layouts, but lets double check to be sure.
+ int channel_count = ChannelLayoutToChannelCount(layout);
+ DCHECK_GT(channel_count, 0);
+
+ // If we have more than one channel, verify a symmetric layout for sanity.
+ // The unit test will verify all possible layouts, so this can be a DCHECK.
+ // Symmetry allows simplifying the matrix building code by allowing us to
+ // assume that if one channel of a pair exists, the other will too.
+ if (channel_count > 1) {
+ DCHECK((ChannelOrder(layout, LEFT) >= 0 &&
+ ChannelOrder(layout, RIGHT) >= 0) ||
+ (ChannelOrder(layout, SIDE_LEFT) >= 0 &&
+ ChannelOrder(layout, SIDE_RIGHT) >= 0) ||
+ (ChannelOrder(layout, BACK_LEFT) >= 0 &&
+ ChannelOrder(layout, BACK_RIGHT) >= 0) ||
+ (ChannelOrder(layout, LEFT_OF_CENTER) >= 0 &&
+ ChannelOrder(layout, RIGHT_OF_CENTER) >= 0))
+ << "Non-symmetric channel layout encountered.";
+ } else {
+ DCHECK_EQ(layout, CHANNEL_LAYOUT_MONO);
+ }
+
+ return;
+}
+
+class MatrixBuilder {
+ public:
+ MatrixBuilder(ChannelLayout input_layout, int input_channels,
+ ChannelLayout output_layout, int output_channels)
+ : input_layout_(input_layout),
+ input_channels_(input_channels),
+ output_layout_(output_layout),
+ output_channels_(output_channels) {
+ // Special case for 5.0, 5.1 with back channels when upmixed to 7.0, 7.1,
+ // which should map the back LR to side LR.
+ if (input_layout_ == CHANNEL_LAYOUT_5_0_BACK &&
+ output_layout_ == CHANNEL_LAYOUT_7_0) {
+ input_layout_ = CHANNEL_LAYOUT_5_0;
+ } else if (input_layout_ == CHANNEL_LAYOUT_5_1_BACK &&
+ output_layout_ == CHANNEL_LAYOUT_7_1) {
+ input_layout_ = CHANNEL_LAYOUT_5_1;
+ }
+ }
+
+ ~MatrixBuilder() { }
+
+ // Create the transformation matrix of input channels to output channels.
+ // Updates the empty matrix with the transformation, and returns true
+ // if the transformation is just a remapping of channels (no mixing).
+ bool CreateTransformationMatrix(std::vector< std::vector<float> >* matrix);
+
+ private:
+ // Result transformation of input channels to output channels
+ std::vector< std::vector<float> >* matrix_;
+
+ // Input and output channel layout provided during construction.
+ ChannelLayout input_layout_;
+ int input_channels_;
+ ChannelLayout output_layout_;
+ int output_channels_;
+
+ // Helper variable for tracking which inputs are currently unaccounted,
+ // should be empty after construction completes.
+ std::vector<Channels> unaccounted_inputs_;
+
+ // Helper methods for managing unaccounted input channels.
+ void AccountFor(Channels ch);
+ bool IsUnaccounted(Channels ch);
+
+ // Helper methods for checking if |ch| exists in either |input_layout_| or
+ // |output_layout_| respectively.
+ bool HasInputChannel(Channels ch);
+ bool HasOutputChannel(Channels ch);
+
+ // Helper methods for updating |matrix_| with the proper value for
+ // mixing |input_ch| into |output_ch|. MixWithoutAccounting() does not
+ // remove the channel from |unaccounted_inputs_|.
+ void Mix(Channels input_ch, Channels output_ch, float scale);
+ void MixWithoutAccounting(Channels input_ch, Channels output_ch,
+ float scale);
+
+ DISALLOW_COPY_AND_ASSIGN(MatrixBuilder);
+};
+
+ChannelMixer::ChannelMixer(ChannelLayout input_layout,
+ ChannelLayout output_layout) {
+ Initialize(input_layout,
+ ChannelLayoutToChannelCount(input_layout),
+ output_layout,
+ ChannelLayoutToChannelCount(output_layout));
+}
+
+ChannelMixer::ChannelMixer(
+ const AudioParameters& input, const AudioParameters& output) {
+ Initialize(input.channel_layout(),
+ input.channels(),
+ output.channel_layout(),
+ output.channels());
+}
+
+void ChannelMixer::Initialize(
+ ChannelLayout input_layout, int input_channels,
+ ChannelLayout output_layout, int output_channels) {
+ // Stereo down mix should never be the output layout.
+ CHECK_NE(output_layout, CHANNEL_LAYOUT_STEREO_DOWNMIX);
+
+ // Verify that the layouts are supported
+ if (input_layout != CHANNEL_LAYOUT_DISCRETE)
+ ValidateLayout(input_layout);
+ if (output_layout != CHANNEL_LAYOUT_DISCRETE)
+ ValidateLayout(output_layout);
+
+ // Create the transformation matrix
+ MatrixBuilder matrix_builder(input_layout, input_channels,
+ output_layout, output_channels);
+ remapping_ = matrix_builder.CreateTransformationMatrix(&matrix_);
+}
+
+bool MatrixBuilder::CreateTransformationMatrix(
+ std::vector< std::vector<float> >* matrix) {
+ matrix_ = matrix;
+
+ // Size out the initial matrix.
+ matrix_->reserve(output_channels_);
+ for (int output_ch = 0; output_ch < output_channels_; ++output_ch)
+ matrix_->push_back(std::vector<float>(input_channels_, 0));
+
+ // First check for discrete case.
+ if (input_layout_ == CHANNEL_LAYOUT_DISCRETE ||
+ output_layout_ == CHANNEL_LAYOUT_DISCRETE) {
+ // If the number of input channels is more than output channels, then
+ // copy as many as we can then drop the remaining input channels.
+ // If the number of input channels is less than output channels, then
+ // copy them all, then zero out the remaining output channels.
+ int passthrough_channels = std::min(input_channels_, output_channels_);
+ for (int i = 0; i < passthrough_channels; ++i)
+ (*matrix_)[i][i] = 1;
+
+ return true;
+ }
+
+ // Route matching channels and figure out which ones aren't accounted for.
+ for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ ch = static_cast<Channels>(ch + 1)) {
+ int input_ch_index = ChannelOrder(input_layout_, ch);
+ if (input_ch_index < 0)
+ continue;
+
+ int output_ch_index = ChannelOrder(output_layout_, ch);
+ if (output_ch_index < 0) {
+ unaccounted_inputs_.push_back(ch);
+ continue;
+ }
+
+ DCHECK_LT(static_cast<size_t>(output_ch_index), matrix_->size());
+ DCHECK_LT(static_cast<size_t>(input_ch_index),
+ (*matrix_)[output_ch_index].size());
+ (*matrix_)[output_ch_index][input_ch_index] = 1;
+ }
+
+ // If all input channels are accounted for, there's nothing left to do.
+ if (unaccounted_inputs_.empty()) {
+ // Since all output channels map directly to inputs we can optimize.
+ return true;
+ }
+
+ // Mix front LR into center.
+ if (IsUnaccounted(LEFT)) {
+ // When down mixing to mono from stereo, we need to be careful of full scale
+ // stereo mixes. Scaling by 1 / sqrt(2) here will likely lead to clipping
+ // so we use 1 / 2 instead.
+ float scale =
+ (output_layout_ == CHANNEL_LAYOUT_MONO && input_channels_ == 2) ?
+ 0.5 : kEqualPowerScale;
+ Mix(LEFT, CENTER, scale);
+ Mix(RIGHT, CENTER, scale);
+ }
+
+ // Mix center into front LR.
+ if (IsUnaccounted(CENTER)) {
+ // When up mixing from mono, just do a copy to front LR.
+ float scale =
+ (input_layout_ == CHANNEL_LAYOUT_MONO) ? 1 : kEqualPowerScale;
+ MixWithoutAccounting(CENTER, LEFT, scale);
+ Mix(CENTER, RIGHT, scale);
+ }
+
+ // Mix back LR into: side LR || back center || front LR || front center.
+ if (IsUnaccounted(BACK_LEFT)) {
+ if (HasOutputChannel(SIDE_LEFT)) {
+ // If we have side LR, mix back LR into side LR, but instead if the input
+ // doesn't have side LR (but output does) copy back LR to side LR.
+ float scale = HasInputChannel(SIDE_LEFT) ? kEqualPowerScale : 1;
+ Mix(BACK_LEFT, SIDE_LEFT, scale);
+ Mix(BACK_RIGHT, SIDE_RIGHT, scale);
+ } else if (HasOutputChannel(BACK_CENTER)) {
+ // Mix back LR into back center.
+ Mix(BACK_LEFT, BACK_CENTER, kEqualPowerScale);
+ Mix(BACK_RIGHT, BACK_CENTER, kEqualPowerScale);
+ } else if (output_layout_ > CHANNEL_LAYOUT_MONO) {
+ // Mix back LR into front LR.
+ Mix(BACK_LEFT, LEFT, kEqualPowerScale);
+ Mix(BACK_RIGHT, RIGHT, kEqualPowerScale);
+ } else {
+ // Mix back LR into front center.
+ Mix(BACK_LEFT, CENTER, kEqualPowerScale);
+ Mix(BACK_RIGHT, CENTER, kEqualPowerScale);
+ }
+ }
+
+ // Mix side LR into: back LR || back center || front LR || front center.
+ if (IsUnaccounted(SIDE_LEFT)) {
+ if (HasOutputChannel(BACK_LEFT)) {
+ // If we have back LR, mix side LR into back LR, but instead if the input
+ // doesn't have back LR (but output does) copy side LR to back LR.
+ float scale = HasInputChannel(BACK_LEFT) ? kEqualPowerScale : 1;
+ Mix(SIDE_LEFT, BACK_LEFT, scale);
+ Mix(SIDE_RIGHT, BACK_RIGHT, scale);
+ } else if (HasOutputChannel(BACK_CENTER)) {
+ // Mix side LR into back center.
+ Mix(SIDE_LEFT, BACK_CENTER, kEqualPowerScale);
+ Mix(SIDE_RIGHT, BACK_CENTER, kEqualPowerScale);
+ } else if (output_layout_ > CHANNEL_LAYOUT_MONO) {
+ // Mix side LR into front LR.
+ Mix(SIDE_LEFT, LEFT, kEqualPowerScale);
+ Mix(SIDE_RIGHT, RIGHT, kEqualPowerScale);
+ } else {
+ // Mix side LR into front center.
+ Mix(SIDE_LEFT, CENTER, kEqualPowerScale);
+ Mix(SIDE_RIGHT, CENTER, kEqualPowerScale);
+ }
+ }
+
+ // Mix back center into: back LR || side LR || front LR || front center.
+ if (IsUnaccounted(BACK_CENTER)) {
+ if (HasOutputChannel(BACK_LEFT)) {
+ // Mix back center into back LR.
+ MixWithoutAccounting(BACK_CENTER, BACK_LEFT, kEqualPowerScale);
+ Mix(BACK_CENTER, BACK_RIGHT, kEqualPowerScale);
+ } else if (HasOutputChannel(SIDE_LEFT)) {
+ // Mix back center into side LR.
+ MixWithoutAccounting(BACK_CENTER, SIDE_LEFT, kEqualPowerScale);
+ Mix(BACK_CENTER, SIDE_RIGHT, kEqualPowerScale);
+ } else if (output_layout_ > CHANNEL_LAYOUT_MONO) {
+ // Mix back center into front LR.
+ // TODO(dalecurtis): Not sure about these values?
+ MixWithoutAccounting(BACK_CENTER, LEFT, kEqualPowerScale);
+ Mix(BACK_CENTER, RIGHT, kEqualPowerScale);
+ } else {
+ // Mix back center into front center.
+ // TODO(dalecurtis): Not sure about these values?
+ Mix(BACK_CENTER, CENTER, kEqualPowerScale);
+ }
+ }
+
+ // Mix LR of center into: front center || front LR.
+ if (IsUnaccounted(LEFT_OF_CENTER)) {
+ if (HasOutputChannel(LEFT)) {
+ // Mix LR of center into front LR.
+ Mix(LEFT_OF_CENTER, LEFT, kEqualPowerScale);
+ Mix(RIGHT_OF_CENTER, RIGHT, kEqualPowerScale);
+ } else {
+ // Mix LR of center into front center.
+ Mix(LEFT_OF_CENTER, CENTER, kEqualPowerScale);
+ Mix(RIGHT_OF_CENTER, CENTER, kEqualPowerScale);
+ }
+ }
+
+ // Mix LFE into: front LR || front center.
+ if (IsUnaccounted(LFE)) {
+ if (!HasOutputChannel(CENTER)) {
+ // Mix LFE into front LR.
+ MixWithoutAccounting(LFE, LEFT, kEqualPowerScale);
+ Mix(LFE, RIGHT, kEqualPowerScale);
+ } else {
+ // Mix LFE into front center.
+ Mix(LFE, CENTER, kEqualPowerScale);
+ }
+ }
+
+ // All channels should now be accounted for.
+ DCHECK(unaccounted_inputs_.empty());
+
+ // See if the output |matrix_| is simply a remapping matrix. If each input
+ // channel maps to a single output channel we can simply remap. Doing this
+ // programmatically is less fragile than logic checks on channel mappings.
+ for (int output_ch = 0; output_ch < output_channels_; ++output_ch) {
+ int input_mappings = 0;
+ for (int input_ch = 0; input_ch < input_channels_; ++input_ch) {
+ // We can only remap if each row contains a single scale of 1. I.e., each
+ // output channel is mapped from a single unscaled input channel.
+ if ((*matrix_)[output_ch][input_ch] != 1 || ++input_mappings > 1)
+ return false;
+ }
+ }
+
+ // If we've gotten here, |matrix_| is simply a remapping.
+ return true;
+}
+
+ChannelMixer::~ChannelMixer() {}
+
+void ChannelMixer::Transform(const AudioBus* input, AudioBus* output) {
+ CHECK_EQ(matrix_.size(), static_cast<size_t>(output->channels()));
+ CHECK_EQ(matrix_[0].size(), static_cast<size_t>(input->channels()));
+ CHECK_EQ(input->frames(), output->frames());
+
+ // Zero initialize |output| so we're accumulating from zero.
+ output->Zero();
+
+ // If we're just remapping we can simply copy the correct input to output.
+ if (remapping_) {
+ for (int output_ch = 0; output_ch < output->channels(); ++output_ch) {
+ for (int input_ch = 0; input_ch < input->channels(); ++input_ch) {
+ float scale = matrix_[output_ch][input_ch];
+ if (scale > 0) {
+ DCHECK_EQ(scale, 1.0f);
+ memcpy(output->channel(output_ch), input->channel(input_ch),
+ sizeof(*output->channel(output_ch)) * output->frames());
+ break;
+ }
+ }
+ }
+ return;
+ }
+
+ for (int output_ch = 0; output_ch < output->channels(); ++output_ch) {
+ for (int input_ch = 0; input_ch < input->channels(); ++input_ch) {
+ float scale = matrix_[output_ch][input_ch];
+ // Scale should always be positive. Don't bother scaling by zero.
+ DCHECK_GE(scale, 0);
+ if (scale > 0) {
+ vector_math::FMAC(input->channel(input_ch), scale, output->frames(),
+ output->channel(output_ch));
+ }
+ }
+ }
+}
+
+void MatrixBuilder::AccountFor(Channels ch) {
+ unaccounted_inputs_.erase(std::find(
+ unaccounted_inputs_.begin(), unaccounted_inputs_.end(), ch));
+}
+
+bool MatrixBuilder::IsUnaccounted(Channels ch) {
+ return std::find(unaccounted_inputs_.begin(), unaccounted_inputs_.end(),
+ ch) != unaccounted_inputs_.end();
+}
+
+bool MatrixBuilder::HasInputChannel(Channels ch) {
+ return ChannelOrder(input_layout_, ch) >= 0;
+}
+
+bool MatrixBuilder::HasOutputChannel(Channels ch) {
+ return ChannelOrder(output_layout_, ch) >= 0;
+}
+
+void MatrixBuilder::Mix(Channels input_ch, Channels output_ch, float scale) {
+ MixWithoutAccounting(input_ch, output_ch, scale);
+ AccountFor(input_ch);
+}
+
+void MatrixBuilder::MixWithoutAccounting(Channels input_ch, Channels output_ch,
+ float scale) {
+ int input_ch_index = ChannelOrder(input_layout_, input_ch);
+ int output_ch_index = ChannelOrder(output_layout_, output_ch);
+
+ DCHECK(IsUnaccounted(input_ch));
+ DCHECK_GE(input_ch_index, 0);
+ DCHECK_GE(output_ch_index, 0);
+
+ DCHECK_EQ((*matrix_)[output_ch_index][input_ch_index], 0);
+ (*matrix_)[output_ch_index][input_ch_index] = scale;
+}
+
+} // namespace media
diff --git a/chromium/media/base/channel_mixer.h b/chromium/media/base/channel_mixer.h
new file mode 100644
index 00000000000..ea3cbf81ba5
--- /dev/null
+++ b/chromium/media/base/channel_mixer.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CHANNEL_MIXER_H_
+#define MEDIA_BASE_CHANNEL_MIXER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/channel_layout.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+class AudioParameters;
+
+// ChannelMixer is for converting audio between channel layouts. The conversion
+// matrix is built upon construction and used during each Transform() call. The
+// algorithm works by generating a conversion matrix mapping each output channel
+// to list of input channels. The transform renders all of the output channels,
+// with each output channel rendered according to a weighted sum of the relevant
+// input channels as defined in the matrix.
+class MEDIA_EXPORT ChannelMixer {
+ public:
+ ChannelMixer(ChannelLayout input_layout, ChannelLayout output_layout);
+ ChannelMixer(const AudioParameters& input, const AudioParameters& output);
+ ~ChannelMixer();
+
+ // Transforms all channels from |input| into |output| channels.
+ void Transform(const AudioBus* input, AudioBus* output);
+
+ private:
+ void Initialize(ChannelLayout input_layout, int input_channels,
+ ChannelLayout output_layout, int output_channels);
+
+ // 2D matrix of output channels to input channels.
+ std::vector< std::vector<float> > matrix_;
+
+ // Optimization case for when we can simply remap the input channels to output
+ // channels and don't need to do a multiply-accumulate loop over |matrix_|.
+ bool remapping_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChannelMixer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CHANNEL_MIXER_H_
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
new file mode 100644
index 00000000000..eddbc1b90ba
--- /dev/null
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -0,0 +1,180 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_SQRT1_2.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "base/strings/stringprintf.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/channel_mixer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Number of frames to test with.
+enum { kFrames = 16 };
+
+// Test all possible layout conversions can be constructed and mixed.
+TEST(ChannelMixerTest, ConstructAllPossibleLayouts) {
+ for (ChannelLayout input_layout = CHANNEL_LAYOUT_MONO;
+ input_layout < CHANNEL_LAYOUT_MAX;
+ input_layout = static_cast<ChannelLayout>(input_layout + 1)) {
+ for (ChannelLayout output_layout = CHANNEL_LAYOUT_MONO;
+ output_layout < CHANNEL_LAYOUT_STEREO_DOWNMIX;
+ output_layout = static_cast<ChannelLayout>(output_layout + 1)) {
+ // DISCRETE can't be tested here based on the current approach.
+ if (input_layout == CHANNEL_LAYOUT_DISCRETE ||
+ output_layout == CHANNEL_LAYOUT_DISCRETE)
+ continue;
+
+ SCOPED_TRACE(base::StringPrintf(
+ "Input Layout: %d, Output Layout: %d", input_layout, output_layout));
+ ChannelMixer mixer(input_layout, output_layout);
+ scoped_ptr<AudioBus> input_bus = AudioBus::Create(
+ ChannelLayoutToChannelCount(input_layout), kFrames);
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(
+ ChannelLayoutToChannelCount(output_layout), kFrames);
+ for (int ch = 0; ch < input_bus->channels(); ++ch)
+ std::fill(input_bus->channel(ch), input_bus->channel(ch) + kFrames, 1);
+
+ mixer.Transform(input_bus.get(), output_bus.get());
+ }
+ }
+}
+
+struct ChannelMixerTestData {
+ ChannelMixerTestData(ChannelLayout input_layout, ChannelLayout output_layout,
+ float* channel_values, int num_channel_values,
+ float scale)
+ : input_layout(input_layout),
+ output_layout(output_layout),
+ channel_values(channel_values),
+ num_channel_values(num_channel_values),
+ scale(scale) {
+ input_channels = ChannelLayoutToChannelCount(input_layout);
+ output_channels = ChannelLayoutToChannelCount(output_layout);
+ }
+
+ ChannelMixerTestData(ChannelLayout input_layout, int input_channels,
+ ChannelLayout output_layout, int output_channels,
+ float* channel_values, int num_channel_values)
+ : input_layout(input_layout),
+ input_channels(input_channels),
+ output_layout(output_layout),
+ output_channels(output_channels),
+ channel_values(channel_values),
+ num_channel_values(num_channel_values),
+ scale(1.0f) {
+ }
+
+ std::string DebugString() const {
+ return base::StringPrintf(
+ "Input Layout: %d, Output Layout %d, Scale: %f", input_layout,
+ output_layout, scale);
+ }
+
+ ChannelLayout input_layout;
+ int input_channels;
+ ChannelLayout output_layout;
+ int output_channels;
+ float* channel_values;
+ int num_channel_values;
+ float scale;
+};
+
+std::ostream& operator<<(std::ostream& os, const ChannelMixerTestData& data) {
+ return os << data.DebugString();
+}
+
+class ChannelMixerTest : public testing::TestWithParam<ChannelMixerTestData> {};
+
+// Verify channels are mixed and scaled correctly. The test only works if all
+// output channels have the same value.
+TEST_P(ChannelMixerTest, Mixing) {
+ ChannelLayout input_layout = GetParam().input_layout;
+ int input_channels = GetParam().input_channels;
+ scoped_ptr<AudioBus> input_bus = AudioBus::Create(input_channels, kFrames);
+ AudioParameters input_audio(AudioParameters::AUDIO_PCM_LINEAR,
+ input_layout,
+ AudioParameters::kAudioCDSampleRate, 16,
+ kFrames);
+ if (input_layout == CHANNEL_LAYOUT_DISCRETE)
+ input_audio.SetDiscreteChannels(input_channels);
+
+ ChannelLayout output_layout = GetParam().output_layout;
+ int output_channels = GetParam().output_channels;
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_channels, kFrames);
+ AudioParameters output_audio(AudioParameters::AUDIO_PCM_LINEAR,
+ output_layout,
+ AudioParameters::kAudioCDSampleRate, 16,
+ kFrames);
+ if (output_layout == CHANNEL_LAYOUT_DISCRETE)
+ output_audio.SetDiscreteChannels(output_channels);
+
+ const float* channel_values = GetParam().channel_values;
+ ASSERT_EQ(input_bus->channels(), GetParam().num_channel_values);
+
+ float expected_value = 0;
+ float scale = GetParam().scale;
+ for (int ch = 0; ch < input_bus->channels(); ++ch) {
+ std::fill(input_bus->channel(ch), input_bus->channel(ch) + kFrames,
+ channel_values[ch]);
+ expected_value += channel_values[ch] * scale;
+ }
+
+ ChannelMixer mixer(input_audio, output_audio);
+ mixer.Transform(input_bus.get(), output_bus.get());
+
+ // Validate the output channel
+ if (input_layout != CHANNEL_LAYOUT_DISCRETE) {
+ for (int ch = 0; ch < output_bus->channels(); ++ch) {
+ for (int frame = 0; frame < output_bus->frames(); ++frame) {
+ ASSERT_FLOAT_EQ(output_bus->channel(ch)[frame], expected_value);
+ }
+ }
+ } else {
+ // Processing discrete mixing. If there is a matching input channel,
+ // then the output channel should be set. If no input channel,
+ // output channel should be 0
+ for (int ch = 0; ch < output_bus->channels(); ++ch) {
+ expected_value = (ch < input_channels) ? channel_values[ch] : 0;
+ for (int frame = 0; frame < output_bus->frames(); ++frame) {
+ ASSERT_FLOAT_EQ(output_bus->channel(ch)[frame], expected_value);
+ }
+ }
+ }
+}
+
+static float kStereoToMonoValues[] = { 0.5f, 0.75f };
+static float kMonoToStereoValues[] = { 0.5f };
+// Zero the center channel since it will be mixed at scale 1 vs M_SQRT1_2.
+static float kFiveOneToMonoValues[] = { 0.1f, 0.2f, 0.0f, 0.4f, 0.5f, 0.6f };
+static float kFiveDiscreteValues[] = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f };
+
+// Run through basic sanity tests for some common conversions.
+INSTANTIATE_TEST_CASE_P(ChannelMixerTest, ChannelMixerTest, testing::Values(
+ ChannelMixerTestData(CHANNEL_LAYOUT_STEREO, CHANNEL_LAYOUT_MONO,
+ kStereoToMonoValues, arraysize(kStereoToMonoValues),
+ 0.5f),
+ ChannelMixerTestData(CHANNEL_LAYOUT_MONO, CHANNEL_LAYOUT_STEREO,
+ kMonoToStereoValues, arraysize(kMonoToStereoValues),
+ 1.0f),
+ ChannelMixerTestData(CHANNEL_LAYOUT_5_1, CHANNEL_LAYOUT_MONO,
+ kFiveOneToMonoValues, arraysize(kFiveOneToMonoValues),
+ static_cast<float>(M_SQRT1_2)),
+ ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE, 2,
+ CHANNEL_LAYOUT_DISCRETE, 2,
+ kStereoToMonoValues, arraysize(kStereoToMonoValues)),
+ ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE, 2,
+ CHANNEL_LAYOUT_DISCRETE, 5,
+ kStereoToMonoValues, arraysize(kStereoToMonoValues)),
+ ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE, 5,
+ CHANNEL_LAYOUT_DISCRETE, 2,
+ kFiveDiscreteValues, arraysize(kFiveDiscreteValues))
+));
+
+} // namespace media
diff --git a/chromium/media/base/clock.cc b/chromium/media/base/clock.cc
new file mode 100644
index 00000000000..ea954834e94
--- /dev/null
+++ b/chromium/media/base/clock.cc
@@ -0,0 +1,140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/clock.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/time/tick_clock.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+Clock::Clock(base::TickClock* clock) : clock_(clock) {
+ DCHECK(clock_);
+ Reset();
+}
+
+Clock::~Clock() {}
+
+bool Clock::IsPlaying() const {
+ return playing_;
+}
+
+base::TimeDelta Clock::Play() {
+ DCHECK(!playing_);
+ UpdateReferencePoints();
+ playing_ = true;
+ return media_time_;
+}
+
+base::TimeDelta Clock::Pause() {
+ DCHECK(playing_);
+ UpdateReferencePoints();
+ playing_ = false;
+ return media_time_;
+}
+
+void Clock::SetPlaybackRate(float playback_rate) {
+ UpdateReferencePoints();
+ playback_rate_ = playback_rate;
+}
+
+void Clock::SetTime(base::TimeDelta current_time, base::TimeDelta max_time) {
+ DCHECK(current_time <= max_time);
+ DCHECK(current_time != kNoTimestamp());
+
+ UpdateReferencePoints(current_time);
+ max_time_ = ClampToValidTimeRange(max_time);
+ underflow_ = false;
+}
+
+base::TimeDelta Clock::Elapsed() {
+ if (duration_ == kNoTimestamp())
+ return base::TimeDelta();
+
+ // The clock is not advancing, so return the last recorded time.
+ if (!playing_ || underflow_)
+ return media_time_;
+
+ base::TimeDelta elapsed = EstimatedElapsedTime();
+ if (max_time_ != kNoTimestamp() && elapsed > max_time_) {
+ UpdateReferencePoints(max_time_);
+ underflow_ = true;
+ elapsed = max_time_;
+ }
+
+ return elapsed;
+}
+
+void Clock::SetMaxTime(base::TimeDelta max_time) {
+ DCHECK(max_time != kNoTimestamp());
+
+ UpdateReferencePoints();
+ max_time_ = ClampToValidTimeRange(max_time);
+
+ underflow_ = media_time_ > max_time_;
+ if (underflow_)
+ media_time_ = max_time_;
+}
+
+void Clock::SetDuration(base::TimeDelta duration) {
+ DCHECK(duration > base::TimeDelta());
+ duration_ = duration;
+
+ media_time_ = ClampToValidTimeRange(media_time_);
+ if (max_time_ != kNoTimestamp())
+ max_time_ = ClampToValidTimeRange(max_time_);
+}
+
+base::TimeDelta Clock::ElapsedViaProvidedTime(
+ const base::TimeTicks& time) const {
+ // TODO(scherkus): floating point badness scaling time by playback rate.
+ int64 now_us = (time - reference_).InMicroseconds();
+ now_us = static_cast<int64>(now_us * playback_rate_);
+ return media_time_ + base::TimeDelta::FromMicroseconds(now_us);
+}
+
+base::TimeDelta Clock::ClampToValidTimeRange(base::TimeDelta time) const {
+ if (duration_ == kNoTimestamp())
+ return base::TimeDelta();
+ return std::max(std::min(time, duration_), base::TimeDelta());
+}
+
+void Clock::EndOfStream() {
+ Pause();
+ SetTime(Duration(), Duration());
+}
+
+base::TimeDelta Clock::Duration() const {
+ if (duration_ == kNoTimestamp())
+ return base::TimeDelta();
+ return duration_;
+}
+
+void Clock::UpdateReferencePoints() {
+ UpdateReferencePoints(Elapsed());
+}
+
+void Clock::UpdateReferencePoints(base::TimeDelta current_time) {
+ media_time_ = ClampToValidTimeRange(current_time);
+ reference_ = clock_->NowTicks();
+}
+
+base::TimeDelta Clock::EstimatedElapsedTime() {
+ return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->NowTicks()));
+}
+
+void Clock::Reset() {
+ playing_ = false;
+ playback_rate_ = 1.0f;
+ max_time_ = kNoTimestamp();
+ duration_ = kNoTimestamp();
+ media_time_ = base::TimeDelta();
+ reference_ = base::TimeTicks();
+ underflow_ = false;
+}
+
+} // namespace media
diff --git a/chromium/media/base/clock.h b/chromium/media/base/clock.h
new file mode 100644
index 00000000000..01449be6250
--- /dev/null
+++ b/chromium/media/base/clock.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CLOCK_H_
+#define MEDIA_BASE_CLOCK_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class TickClock;
+} // namespace base
+
+namespace media {
+
+// A clock represents a single source of time to allow audio and video streams
+// to synchronize with each other. Clock essentially tracks the media time with
+// respect to some other source of time, whether that may be the monotonic
+// system clock or updates via SetTime(). Clock uses linear interpolation to
+// calculate the current media time since the last time SetTime() was called.
+//
+// Clocks start off paused with a playback rate of 1.0f and a media time of 0.
+//
+// Clock is not thread-safe and must be externally locked.
+//
+// TODO(scherkus): Clock will some day be responsible for executing callbacks
+// given a media time. This will be used primarily by video renderers. For now
+// we'll keep using a poll-and-sleep solution.
+//
+// TODO(miu): Rename media::Clock to avoid confusion (and tripping up the media
+// PRESUBMIT script on future changes).
+class MEDIA_EXPORT Clock {
+ public:
+ explicit Clock(base::TickClock* clock);
+ ~Clock();
+
+ // Returns true if the clock is running.
+ bool IsPlaying() const;
+
+ // Starts the clock and returns the current media time, which will increase
+ // with respect to the current playback rate.
+ base::TimeDelta Play();
+
+ // Stops the clock and returns the current media time, which will remain
+ // constant until Play() is called.
+ base::TimeDelta Pause();
+
+ // Sets a new playback rate. The rate at which the media time will increase
+ // will now change.
+ void SetPlaybackRate(float playback_rate);
+
+ // Forcefully sets the media time to |current_time|. The second parameter is
+ // the |max_time| that the clock should progress after a call to Play(). This
+ // value is often the time of the end of the last frame buffered and decoded.
+ //
+ // These values are clamped to the duration of the video, which is initially
+ // set to 0 (before SetDuration() is called).
+ void SetTime(base::TimeDelta current_time, base::TimeDelta max_time);
+
+ // Sets the |max_time| to be returned by a call to Elapsed().
+ void SetMaxTime(base::TimeDelta max_time);
+
+ // Returns the current elapsed media time. Returns 0 if SetDuration() has
+ // never been called.
+ base::TimeDelta Elapsed();
+
+ // Sets the duration of the video. Clock expects the duration will be set
+ // exactly once.
+ void SetDuration(base::TimeDelta duration);
+
+ // Resets clock to an uninitialized state.
+ void Reset();
+
+ // Notifies the clock that the end of stream has been reached. The clock state
+ // is updated accordingly.
+ void EndOfStream();
+
+ // Returns the duration of the clock, or 0 if not set.
+ base::TimeDelta Duration() const;
+
+ private:
+ // Updates the reference points based on the current calculated time.
+ void UpdateReferencePoints();
+
+ // Updates the reference points based on the given |current_time|.
+ void UpdateReferencePoints(base::TimeDelta current_time);
+
+ // Returns the time elapsed based on the current reference points, ignoring
+ // the |max_time_| cap.
+ base::TimeDelta EstimatedElapsedTime();
+
+ // Translates |time| into the current media time, based on the perspective of
+ // the monotonically-increasing system clock.
+ base::TimeDelta ElapsedViaProvidedTime(const base::TimeTicks& time) const;
+
+ base::TimeDelta ClampToValidTimeRange(base::TimeDelta time) const;
+
+ base::TickClock* const clock_;
+
+ // Whether the clock is running.
+ bool playing_;
+
+ // Whether the clock is stalled because it has reached the |max_time_|
+ // allowed.
+ bool underflow_;
+
+ // The monotonic system clock time when this Clock last started playing or had
+ // its time set via SetTime().
+ base::TimeTicks reference_;
+
+ // Current accumulated amount of media time. The remaining portion must be
+ // calculated by comparing the system time to the reference time.
+ base::TimeDelta media_time_;
+
+ // Current playback rate.
+ float playback_rate_;
+
+ // The maximum time that can be returned by calls to Elapsed().
+ base::TimeDelta max_time_;
+
+ // Duration of the media.
+ base::TimeDelta duration_;
+
+ DISALLOW_COPY_AND_ASSIGN(Clock);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_CLOCK_H_
diff --git a/chromium/media/base/clock_unittest.cc b/chromium/media/base/clock_unittest.cc
new file mode 100644
index 00000000000..3bf05996c62
--- /dev/null
+++ b/chromium/media/base/clock_unittest.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/clock.h"
+#include "media/base/clock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace base {
+
+// Provide a stream output operator so we can use EXPECT_EQ(...) with TimeDelta.
+//
+// TODO(scherkus): move this into the testing package.
+static std::ostream& operator<<(std::ostream& stream, const TimeDelta& time) {
+ return (stream << time.ToInternalValue());
+}
+
+} // namespace
+
+namespace media {
+
+static const int kDurationInSeconds = 120;
+
+class ClockTest : public ::testing::Test {
+ public:
+ ClockTest() : clock_(&test_tick_clock_) {
+ SetDuration();
+ }
+
+ protected:
+ void SetDuration() {
+ const base::TimeDelta kDuration =
+ base::TimeDelta::FromSeconds(kDurationInSeconds);
+ clock_.SetDuration(kDuration);
+ EXPECT_EQ(kDuration, clock_.Duration());
+ }
+
+ void AdvanceSystemTime(base::TimeDelta delta) {
+ test_tick_clock_.Advance(delta);
+ }
+
+ base::SimpleTestTickClock test_tick_clock_;
+ Clock clock_;
+ base::TimeDelta time_elapsed_;
+};
+
+TEST_F(ClockTest, Created) {
+ const base::TimeDelta kExpected = base::TimeDelta::FromSeconds(0);
+ EXPECT_EQ(kExpected, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Play_NormalSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(2);
+
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(kTimeToAdvance, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Play_DoubleSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(5);
+
+ clock_.SetPlaybackRate(2.0f);
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(2 * kTimeToAdvance, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Play_HalfSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(4);
+
+ clock_.SetPlaybackRate(0.5f);
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(kTimeToAdvance / 2, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Play_ZeroSpeed) {
+ // We'll play for 2 seconds at normal speed, 4 seconds at zero speed, and 8
+ // seconds at normal speed.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
+ const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
+ const base::TimeDelta kExpected = kPlayDuration1 + kPlayDuration3;
+
+ EXPECT_EQ(kZero, clock_.Play());
+
+ AdvanceSystemTime(kPlayDuration1);
+ clock_.SetPlaybackRate(0.0f);
+ AdvanceSystemTime(kPlayDuration2);
+ clock_.SetPlaybackRate(1.0f);
+ AdvanceSystemTime(kPlayDuration3);
+
+ EXPECT_EQ(kExpected, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Play_MultiSpeed) {
+ // We'll play for 2 seconds at half speed, 4 seconds at normal speed, and 8
+ // seconds at double speed.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
+ const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
+ const base::TimeDelta kExpected =
+ kPlayDuration1 / 2 + kPlayDuration2 + 2 * kPlayDuration3;
+
+ clock_.SetPlaybackRate(0.5f);
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kPlayDuration1);
+
+ clock_.SetPlaybackRate(1.0f);
+ AdvanceSystemTime(kPlayDuration2);
+
+ clock_.SetPlaybackRate(2.0f);
+ AdvanceSystemTime(kPlayDuration3);
+ EXPECT_EQ(kExpected, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, Pause) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPauseDuration = base::TimeDelta::FromSeconds(20);
+ const base::TimeDelta kExpectedFirstPause = kPlayDuration;
+ const base::TimeDelta kExpectedSecondPause = 2 * kPlayDuration;
+
+ // Play for 4 seconds.
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kPlayDuration);
+
+ // Pause for 20 seconds.
+ EXPECT_EQ(kExpectedFirstPause, clock_.Pause());
+ EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
+ AdvanceSystemTime(kPauseDuration);
+ EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
+
+ // Play again for 4 more seconds.
+ EXPECT_EQ(kExpectedFirstPause, clock_.Play());
+ AdvanceSystemTime(kPlayDuration);
+ EXPECT_EQ(kExpectedSecondPause, clock_.Pause());
+ EXPECT_EQ(kExpectedSecondPause, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, SetTime_Paused) {
+ const base::TimeDelta kFirstTime = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kSecondTime = base::TimeDelta::FromSeconds(16);
+
+ clock_.SetTime(kFirstTime, clock_.Duration());
+ EXPECT_EQ(kFirstTime, clock_.Elapsed());
+ clock_.SetTime(kSecondTime, clock_.Duration());
+ EXPECT_EQ(kSecondTime, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, SetTime_Playing) {
+ // We'll play for 4 seconds, then set the time to 12, then play for 4 more
+ // seconds.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kUpdatedTime = base::TimeDelta::FromSeconds(12);
+ const base::TimeDelta kExpected = kUpdatedTime + kPlayDuration;
+
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(kPlayDuration);
+
+ clock_.SetTime(kUpdatedTime, clock_.Duration());
+ AdvanceSystemTime(kPlayDuration);
+ EXPECT_EQ(kExpected, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, CapAtMediaDuration_Paused) {
+ const base::TimeDelta kDuration =
+ base::TimeDelta::FromSeconds(kDurationInSeconds);
+ const base::TimeDelta kTimeOverDuration =
+ base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
+
+ // Elapsed time should always be capped at the duration of the media.
+ clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
+ EXPECT_EQ(kDuration, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, CapAtMediaDuration_Playing) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kDuration =
+ base::TimeDelta::FromSeconds(kDurationInSeconds);
+ const base::TimeDelta kTimeOverDuration =
+ base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
+
+ // Play for twice as long as the duration of the media.
+ EXPECT_EQ(kZero, clock_.Play());
+ AdvanceSystemTime(2 * kDuration);
+ EXPECT_EQ(kDuration, clock_.Elapsed());
+
+ // Manually set the time past the duration.
+ clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
+ EXPECT_EQ(kDuration, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, SetMaxTime) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(6);
+
+ EXPECT_EQ(kZero, clock_.Play());
+ clock_.SetMaxTime(kMaxTime);
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kTimeInterval, clock_.Elapsed());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime, clock_.Elapsed());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime, clock_.Elapsed());
+}
+
+TEST_F(ClockTest, SetMaxTime_MultipleTimes) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kMaxTime1 = base::TimeDelta::FromSeconds(6);
+ const base::TimeDelta kMaxTime2 = base::TimeDelta::FromSeconds(12);
+
+ EXPECT_EQ(kZero, clock_.Play());
+ clock_.SetMaxTime(clock_.Duration());
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kTimeInterval, clock_.Elapsed());
+
+ clock_.SetMaxTime(kMaxTime1);
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1, clock_.Elapsed());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1, clock_.Elapsed());
+
+ clock_.SetMaxTime(kMaxTime2);
+ EXPECT_EQ(kMaxTime1, clock_.Elapsed());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1 + kTimeInterval, clock_.Elapsed());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime2, clock_.Elapsed());
+}
+
+} // namespace media
diff --git a/chromium/media/base/container_names.cc b/chromium/media/base/container_names.cc
new file mode 100644
index 00000000000..f062929d54b
--- /dev/null
+++ b/chromium/media/base/container_names.cc
@@ -0,0 +1,1671 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/container_names.h"
+
+#include <cctype>
+#include <limits>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+
+namespace media {
+
+namespace container_names {
+
+#define TAG(a, b, c, d) \
+ ((static_cast<uint8>(a) << 24) | (static_cast<uint8>(b) << 16) | \
+ (static_cast<uint8>(c) << 8) | (static_cast<uint8>(d)))
+
+#define RCHECK(x) \
+ do { \
+ if (!(x)) \
+ return false; \
+ } while (0)
+
+#define UTF8_BYTE_ORDER_MARK "\xef\xbb\xbf"
+
+// Helper function to read 2 bytes (16 bits, big endian) from a buffer.
+static int Read16(const uint8* p) {
+ return p[0] << 8 | p[1];
+}
+
+// Helper function to read 3 bytes (24 bits, big endian) from a buffer.
+static uint32 Read24(const uint8* p) {
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+// Helper function to read 4 bytes (32 bits, big endian) from a buffer.
+static uint32 Read32(const uint8* p) {
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+// Helper function to read 4 bytes (32 bits, little endian) from a buffer.
+static uint32 Read32LE(const uint8* p) {
+ return p[3] << 24 | p[2] << 16 | p[1] << 8 | p[0];
+}
+
+// Helper function to do buffer comparisons with a string without going off the
+// end of the buffer.
+static bool StartsWith(const uint8* buffer,
+ size_t buffer_size,
+ const char* prefix) {
+ size_t prefix_size = strlen(prefix);
+ return (prefix_size <= buffer_size &&
+ memcmp(buffer, prefix, prefix_size) == 0);
+}
+
+// Helper function to do buffer comparisons with another buffer (to allow for
+// embedded \0 in the comparison) without going off the end of the buffer.
+static bool StartsWith(const uint8* buffer,
+ size_t buffer_size,
+ const uint8* prefix,
+ size_t prefix_size) {
+ return (prefix_size <= buffer_size &&
+ memcmp(buffer, prefix, prefix_size) == 0);
+}
+
+// Helper function to read up to 64 bits from a bit stream.
+static uint64 ReadBits(BitReader* reader, int num_bits) {
+ DCHECK_GE(reader->bits_available(), num_bits);
+ DCHECK((num_bits > 0) && (num_bits <= 64));
+ uint64 value;
+ reader->ReadBits(num_bits, &value);
+ return value;
+}
+
+const int kAc3FrameSizeTable[38][3] = {
+ { 128, 138, 192 }, { 128, 140, 192 }, { 160, 174, 240 }, { 160, 176, 240 },
+ { 192, 208, 288 }, { 192, 210, 288 }, { 224, 242, 336 }, { 224, 244, 336 },
+ { 256, 278, 384 }, { 256, 280, 384 }, { 320, 348, 480 }, { 320, 350, 480 },
+ { 384, 416, 576 }, { 384, 418, 576 }, { 448, 486, 672 }, { 448, 488, 672 },
+ { 512, 556, 768 }, { 512, 558, 768 }, { 640, 696, 960 }, { 640, 698, 960 },
+ { 768, 834, 1152 }, { 768, 836, 1152 }, { 896, 974, 1344 },
+ { 896, 976, 1344 }, { 1024, 1114, 1536 }, { 1024, 1116, 1536 },
+ { 1280, 1392, 1920 }, { 1280, 1394, 1920 }, { 1536, 1670, 2304 },
+ { 1536, 1672, 2304 }, { 1792, 1950, 2688 }, { 1792, 1952, 2688 },
+ { 2048, 2228, 3072 }, { 2048, 2230, 3072 }, { 2304, 2506, 3456 },
+ { 2304, 2508, 3456 }, { 2560, 2768, 3840 }, { 2560, 2770, 3840 }
+};
+
+// Checks for an ADTS AAC container.
+static bool CheckAac(const uint8* buffer, int buffer_size) {
+ // Audio Data Transport Stream (ADTS) header is 7 or 9 bytes
+ // (from http://wiki.multimedia.cx/index.php?title=ADTS)
+ RCHECK(buffer_size > 6);
+
+ int offset = 0;
+ while (offset + 6 < buffer_size) {
+ BitReader reader(buffer + offset, 6);
+
+ // Syncword must be 0xfff.
+ RCHECK(ReadBits(&reader, 12) == 0xfff);
+
+ // Skip MPEG version.
+ reader.SkipBits(1);
+
+ // Layer is always 0.
+ RCHECK(ReadBits(&reader, 2) == 0);
+
+ // Skip protection + profile.
+ reader.SkipBits(1 + 2);
+
+ // Check sampling frequency index.
+ RCHECK(ReadBits(&reader, 4) != 15); // Forbidden.
+
+ // Skip private stream, channel configuration, originality, home,
+ // copyrighted stream, and copyright_start.
+ reader.SkipBits(1 + 3 + 1 + 1 + 1 + 1);
+
+ // Get frame length (includes header).
+ int size = ReadBits(&reader, 13);
+ RCHECK(size > 0);
+ offset += size;
+ }
+ return true;
+}
+
+const uint16 kAc3SyncWord = 0x0b77;
+
+// Checks for an AC3 container.
+static bool CheckAc3(const uint8* buffer, int buffer_size) {
+ // Reference: ATSC Standard: Digital Audio Compression (AC-3, E-AC-3)
+ // Doc. A/52:2012
+ // (http://www.atsc.org/cms/standards/A52-2012(12-17).pdf)
+
+ // AC3 container looks like syncinfo | bsi | audblk * 6 | aux | check.
+ RCHECK(buffer_size > 6);
+
+ int offset = 0;
+ while (offset + 6 < buffer_size) {
+ BitReader reader(buffer + offset, 6);
+
+ // Check syncinfo.
+ RCHECK(ReadBits(&reader, 16) == kAc3SyncWord);
+
+ // Skip crc1.
+ reader.SkipBits(16);
+
+ // Verify fscod.
+ int sample_rate_code = ReadBits(&reader, 2);
+ RCHECK(sample_rate_code != 3); // Reserved.
+
+ // Verify frmsizecod.
+ int frame_size_code = ReadBits(&reader, 6);
+ RCHECK(frame_size_code < 38); // Undefined.
+
+ // Verify bsid.
+ RCHECK(ReadBits(&reader, 5) < 10); // Normally 8 or 6, 16 used by EAC3.
+
+ offset += kAc3FrameSizeTable[frame_size_code][sample_rate_code];
+ }
+ return true;
+}
+
+// Checks for an EAC3 container (very similar to AC3)
+static bool CheckEac3(const uint8* buffer, int buffer_size) {
+ // Reference: ATSC Standard: Digital Audio Compression (AC-3, E-AC-3)
+ // Doc. A/52:2012
+ // (http://www.atsc.org/cms/standards/A52-2012(12-17).pdf)
+
+ // EAC3 container looks like syncinfo | bsi | audfrm | audblk* | aux | check.
+ RCHECK(buffer_size > 6);
+
+ int offset = 0;
+ while (offset + 6 < buffer_size) {
+ BitReader reader(buffer + offset, 6);
+
+ // Check syncinfo.
+ RCHECK(ReadBits(&reader, 16) == kAc3SyncWord);
+
+ // Verify strmtyp.
+ RCHECK(ReadBits(&reader, 2) != 3);
+
+ // Skip substreamid.
+ reader.SkipBits(3);
+
+ // Get frmsize. Include syncinfo size and convert to bytes.
+ int frame_size = (ReadBits(&reader, 11) + 1) * 2;
+ RCHECK(frame_size >= 7);
+
+ // Skip fscod, fscod2, acmod, and lfeon.
+ reader.SkipBits(2 + 2 + 3 + 1);
+
+ // Verify bsid.
+ int bit_stream_id = ReadBits(&reader, 5);
+ RCHECK(bit_stream_id >= 11 && bit_stream_id <= 16);
+
+ offset += frame_size;
+ }
+ return true;
+}
+
+// Additional checks for a BINK container.
+static bool CheckBink(const uint8* buffer, int buffer_size) {
+ // Reference: http://wiki.multimedia.cx/index.php?title=Bink_Container
+ RCHECK(buffer_size >= 44);
+
+ // Verify number of frames specified.
+ RCHECK(Read32LE(buffer + 8) > 0);
+
+ // Verify width in range.
+ int width = Read32LE(buffer + 20);
+ RCHECK(width > 0 && width <= 32767);
+
+ // Verify height in range.
+ int height = Read32LE(buffer + 24);
+ RCHECK(height > 0 && height <= 32767);
+
+ // Verify frames per second specified.
+ RCHECK(Read32LE(buffer + 28) > 0);
+
+ // Verify video frames per second specified.
+ RCHECK(Read32LE(buffer + 32) > 0);
+
+ // Number of audio tracks must be 256 or less.
+ return (Read32LE(buffer + 40) <= 256);
+}
+
+// Additional checks for a CAF container.
+static bool CheckCaf(const uint8* buffer, int buffer_size) {
+ // Reference: Apple Core Audio Format Specification 1.0
+ // (https://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/CAFSpec/CAF_spec/CAF_spec.html)
+ RCHECK(buffer_size >= 52);
+ BitReader reader(buffer, buffer_size);
+
+ // mFileType should be "caff".
+ RCHECK(ReadBits(&reader, 32) == TAG('c', 'a', 'f', 'f'));
+
+ // mFileVersion should be 1.
+ RCHECK(ReadBits(&reader, 16) == 1);
+
+ // Skip mFileFlags.
+ reader.SkipBits(16);
+
+ // First chunk should be Audio Description chunk, size 32l.
+ RCHECK(ReadBits(&reader, 32) == TAG('d', 'e', 's', 'c'));
+ RCHECK(ReadBits(&reader, 64) == 32);
+
+ // CAFAudioFormat.mSampleRate(float64) not 0
+ RCHECK(ReadBits(&reader, 64) != 0);
+
+ // CAFAudioFormat.mFormatID not 0
+ RCHECK(ReadBits(&reader, 32) != 0);
+
+ // Skip CAFAudioFormat.mBytesPerPacket and mFramesPerPacket.
+ reader.SkipBits(32 + 32);
+
+ // CAFAudioFormat.mChannelsPerFrame not 0
+ RCHECK(ReadBits(&reader, 32) != 0);
+ return true;
+}
+
+static bool kSamplingFrequencyValid[16] = { false, true, true, true, false,
+ false, true, true, true, false,
+ false, true, true, true, false,
+ false };
+static bool kExtAudioIdValid[8] = { true, false, true, false, false, false,
+ true, false };
+
+// Additional checks for a DTS container.
+static bool CheckDts(const uint8* buffer, int buffer_size) {
+ // Reference: ETSI TS 102 114 V1.3.1 (2011-08)
+ // (http://www.etsi.org/deliver/etsi_ts/102100_102199/102114/01.03.01_60/ts_102114v010301p.pdf)
+ RCHECK(buffer_size > 11);
+
+ int offset = 0;
+ while (offset + 11 < buffer_size) {
+ BitReader reader(buffer + offset, 11);
+
+ // Verify sync word.
+ RCHECK(ReadBits(&reader, 32) == 0x7ffe8001);
+
+ // Skip frame type and deficit sample count.
+ reader.SkipBits(1 + 5);
+
+ // Verify CRC present flag.
+ RCHECK(ReadBits(&reader, 1) == 0); // CPF must be 0.
+
+ // Verify number of PCM sample blocks.
+ RCHECK(ReadBits(&reader, 7) >= 5);
+
+ // Verify primary frame byte size.
+ int frame_size = ReadBits(&reader, 14);
+ RCHECK(frame_size >= 95);
+
+ // Skip audio channel arrangement.
+ reader.SkipBits(6);
+
+ // Verify core audio sampling frequency is an allowed value.
+ RCHECK(kSamplingFrequencyValid[ReadBits(&reader, 4)]);
+
+ // Verify transmission bit rate is valid.
+ RCHECK(ReadBits(&reader, 5) <= 25);
+
+ // Verify reserved field is 0.
+ RCHECK(ReadBits(&reader, 1) == 0);
+
+ // Skip dynamic range flag, time stamp flag, auxiliary data flag, and HDCD.
+ reader.SkipBits(1 + 1 + 1 + 1);
+
+ // Verify extension audio descriptor flag is an allowed value.
+ RCHECK(kExtAudioIdValid[ReadBits(&reader, 3)]);
+
+ // Skip extended coding flag and audio sync word insertion flag.
+ reader.SkipBits(1 + 1);
+
+ // Verify low frequency effects flag is an allowed value.
+ RCHECK(ReadBits(&reader, 2) != 3);
+
+ offset += frame_size + 1;
+ }
+ return true;
+}
+
+// Checks for a DV container.
+static bool CheckDV(const uint8* buffer, int buffer_size) {
+ // Reference: SMPTE 314M (Annex A has differences with IEC 61834).
+ // (http://standards.smpte.org/content/978-1-61482-454-1/st-314-2005/SEC1.body.pdf)
+ RCHECK(buffer_size > 11);
+
+ int offset = 0;
+ int current_sequence_number = -1;
+ int last_block_number[6];
+ while (offset + 11 < buffer_size) {
+ BitReader reader(buffer + offset, 11);
+
+ // Decode ID data. Sections 5, 6, and 7 are reserved.
+ int section = ReadBits(&reader, 3);
+ RCHECK(section < 5);
+
+ // Next bit must be 1.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Skip arbitrary bits.
+ reader.SkipBits(4);
+
+ int sequence_number = ReadBits(&reader, 4);
+
+ // Skip FSC.
+ reader.SkipBits(1);
+
+ // Next 3 bits must be 1.
+ RCHECK(ReadBits(&reader, 3) == 7);
+
+ int block_number = ReadBits(&reader, 8);
+
+ if (section == 0) { // Header.
+ // Validate the reserved bits in the next 8 bytes.
+ reader.SkipBits(1);
+ RCHECK(ReadBits(&reader, 1) == 0);
+ RCHECK(ReadBits(&reader, 11) == 0x7ff);
+ reader.SkipBits(4);
+ RCHECK(ReadBits(&reader, 4) == 0xf);
+ reader.SkipBits(4);
+ RCHECK(ReadBits(&reader, 4) == 0xf);
+ reader.SkipBits(4);
+ RCHECK(ReadBits(&reader, 4) == 0xf);
+ reader.SkipBits(3);
+ RCHECK(ReadBits(&reader, 24) == 0xffffff);
+ current_sequence_number = sequence_number;
+ for (size_t i = 0; i < arraysize(last_block_number); ++i)
+ last_block_number[i] = -1;
+ } else {
+ // Sequence number must match (this will also fail if no header seen).
+ RCHECK(sequence_number == current_sequence_number);
+ // Block number should be increasing.
+ RCHECK(block_number > last_block_number[section]);
+ last_block_number[section] = block_number;
+ }
+
+ // Move to next block.
+ offset += 80;
+ }
+ return true;
+}
+
+
+// Checks for a GSM container.
+static bool CheckGsm(const uint8* buffer, int buffer_size) {
+ // Reference: ETSI EN 300 961 V8.1.1
+ // (http://www.etsi.org/deliver/etsi_en/300900_300999/300961/08.01.01_60/en_300961v080101p.pdf)
+ // also http://tools.ietf.org/html/rfc3551#page-24
+ // GSM files have a 33 byte block, only first 4 bits are fixed.
+ RCHECK(buffer_size >= 1024); // Need enough data to do a decent check.
+
+ int offset = 0;
+ while (offset < buffer_size) {
+ // First 4 bits of each block are xD.
+ RCHECK((buffer[offset] & 0xf0) == 0xd0);
+ offset += 33;
+ }
+ return true;
+}
+
+// Advance to the first set of |num_bits| bits that match |start_code|. |offset|
+// is the current location in the buffer, and is updated. |bytes_needed| is the
+// number of bytes that must remain in the buffer when |start_code| is found.
+// Returns true if start_code found (and enough space in the buffer after it),
+// false otherwise.
+static bool AdvanceToStartCode(const uint8* buffer,
+ int buffer_size,
+ int* offset,
+ int bytes_needed,
+ int num_bits,
+ uint32 start_code) {
+ DCHECK_GE(bytes_needed, 3);
+ DCHECK_LE(num_bits, 24); // Only supports up to 24 bits.
+
+ // Create a mask to isolate |num_bits| bits, once shifted over.
+ uint32 bits_to_shift = 24 - num_bits;
+ uint32 mask = (1 << num_bits) - 1;
+ while (*offset + bytes_needed < buffer_size) {
+ uint32 next = Read24(buffer + *offset);
+ if (((next >> bits_to_shift) & mask) == start_code)
+ return true;
+ ++(*offset);
+ }
+ return false;
+}
+
+// Checks for an H.261 container.
+static bool CheckH261(const uint8* buffer, int buffer_size) {
+ // Reference: ITU-T Recommendation H.261 (03/1993)
+ // (http://www.itu.int/rec/T-REC-H.261-199303-I/en)
+ RCHECK(buffer_size > 16);
+
+ int offset = 0;
+ bool seen_start_code = false;
+ while (true) {
+ // Advance to picture_start_code, if there is one.
+ if (!AdvanceToStartCode(buffer, buffer_size, &offset, 4, 20, 0x10)) {
+ // No start code found (or off end of buffer), so success if
+ // there was at least one valid header.
+ return seen_start_code;
+ }
+
+ // Now verify the block. AdvanceToStartCode() made sure that there are
+ // at least 4 bytes remaining in the buffer.
+ BitReader reader(buffer + offset, buffer_size - offset);
+ RCHECK(ReadBits(&reader, 20) == 0x10);
+
+ // Skip the temporal reference and PTYPE.
+ reader.SkipBits(5 + 6);
+
+ // Skip any extra insertion information. Since this is open-ended, if we run
+ // out of bits assume that the buffer is correctly formatted.
+ int extra = ReadBits(&reader, 1);
+ while (extra == 1) {
+ if (!reader.SkipBits(8))
+ return seen_start_code;
+ if (!reader.ReadBits(1, &extra))
+ return seen_start_code;
+ }
+
+ // Next should be a Group of Blocks start code. Again, if we run out of
+ // bits, then assume that the buffer up to here is correct, and the buffer
+ // just happened to end in the middle of a header.
+ int next;
+ if (!reader.ReadBits(16, &next))
+ return seen_start_code;
+ RCHECK(next == 1);
+
+ // Move to the next block.
+ seen_start_code = true;
+ offset += 4;
+ }
+}
+
+// Checks for an H.263 container.
+static bool CheckH263(const uint8* buffer, int buffer_size) {
+ // Reference: ITU-T Recommendation H.263 (01/2005)
+ // (http://www.itu.int/rec/T-REC-H.263-200501-I/en)
+ // header is PSC(22b) + TR(8b) + PTYPE(8+b).
+ RCHECK(buffer_size > 16);
+
+ int offset = 0;
+ bool seen_start_code = false;
+ while (true) {
+ // Advance to picture_start_code, if there is one.
+ if (!AdvanceToStartCode(buffer, buffer_size, &offset, 9, 22, 0x20)) {
+ // No start code found (or off end of buffer), so success if
+ // there was at least one valid header.
+ return seen_start_code;
+ }
+
+ // Now verify the block. AdvanceToStartCode() made sure that there are
+ // at least 9 bytes remaining in the buffer.
+ BitReader reader(buffer + offset, 9);
+ RCHECK(ReadBits(&reader, 22) == 0x20);
+
+ // Skip the temporal reference.
+ reader.SkipBits(8);
+
+ // Verify that the first 2 bits of PTYPE are 10b.
+ RCHECK(ReadBits(&reader, 2) == 2);
+
+ // Skip the split screen indicator, document camera indicator, and full
+ // picture freeze release.
+ reader.SkipBits(1 + 1 + 1);
+
+ // Verify Source Format.
+ int format = ReadBits(&reader, 3);
+ RCHECK(format != 0 && format != 6); // Forbidden or reserved.
+
+ if (format == 7) {
+ // Verify full extended PTYPE.
+ int ufep = ReadBits(&reader, 3);
+ if (ufep == 1) {
+ // Verify the optional part of PLUSPTYPE.
+ format = ReadBits(&reader, 3);
+ RCHECK(format != 0 && format != 7); // Reserved.
+ reader.SkipBits(11);
+ // Next 4 bits should be b1000.
+ RCHECK(ReadBits(&reader, 4) == 8); // Not allowed.
+ } else {
+ RCHECK(ufep == 0); // Only 0 and 1 allowed.
+ }
+
+ // Verify picture type code is not a reserved value.
+ int picture_type_code = ReadBits(&reader, 3);
+ RCHECK(picture_type_code != 6 && picture_type_code != 7); // Reserved.
+
+ // Skip picture resampling mode, reduced resolution mode,
+ // and rounding type.
+ reader.SkipBits(1 + 1 + 1);
+
+ // Next 3 bits should be b001.
+ RCHECK(ReadBits(&reader, 3) == 1); // Not allowed.
+ }
+
+ // Move to the next block.
+ seen_start_code = true;
+ offset += 9;
+ }
+}
+
+// Checks for an H.264 container.
+static bool CheckH264(const uint8* buffer, int buffer_size) {
+ // Reference: ITU-T Recommendation H.264 (01/2012)
+ // (http://www.itu.int/rec/T-REC-H.264)
+ // Section B.1: Byte stream NAL unit syntax and semantics.
+ RCHECK(buffer_size > 4);
+
+ int offset = 0;
+ int parameter_count = 0;
+ while (true) {
+ // Advance to picture_start_code, if there is one.
+ if (!AdvanceToStartCode(buffer, buffer_size, &offset, 4, 24, 1)) {
+ // No start code found (or off end of buffer), so success if
+ // there was at least one valid header.
+ return parameter_count > 0;
+ }
+
+ // Now verify the block. AdvanceToStartCode() made sure that there are
+ // at least 4 bytes remaining in the buffer.
+ BitReader reader(buffer + offset, 4);
+ RCHECK(ReadBits(&reader, 24) == 1);
+
+ // Verify forbidden_zero_bit.
+ RCHECK(ReadBits(&reader, 1) == 0);
+
+ // Extract nal_ref_idc and nal_unit_type.
+ int nal_ref_idc = ReadBits(&reader, 2);
+ int nal_unit_type = ReadBits(&reader, 5);
+
+ switch (nal_unit_type) {
+ case 5: // Coded slice of an IDR picture.
+ RCHECK(nal_ref_idc != 0);
+ break;
+ case 6: // Supplemental enhancement information (SEI).
+ case 9: // Access unit delimiter.
+ case 10: // End of sequence.
+ case 11: // End of stream.
+ case 12: // Filler data.
+ RCHECK(nal_ref_idc == 0);
+ break;
+ case 7: // Sequence parameter set.
+ case 8: // Picture parameter set.
+ ++parameter_count;
+ break;
+ }
+
+ // Skip the current start_code_prefix and move to the next.
+ offset += 4;
+ }
+}
+
+static const char kHlsSignature[] = "#EXTM3U";
+static const char kHls1[] = "#EXT-X-STREAM-INF:";
+static const char kHls2[] = "#EXT-X-TARGETDURATION:";
+static const char kHls3[] = "#EXT-X-MEDIA-SEQUENCE:";
+
+// Additional checks for a HLS container.
+static bool CheckHls(const uint8* buffer, int buffer_size) {
+ // HLS is simply a play list used for Apple HTTP Live Streaming.
+ // Reference: Apple HTTP Live Streaming Overview
+ // (http://goo.gl/MIwxj)
+
+ if (StartsWith(buffer, buffer_size, kHlsSignature)) {
+ // Need to find "#EXT-X-STREAM-INF:", "#EXT-X-TARGETDURATION:", or
+ // "#EXT-X-MEDIA-SEQUENCE:" somewhere in the buffer. Other playlists (like
+ // WinAmp) only have additional lines with #EXTINF
+ // (http://en.wikipedia.org/wiki/M3U).
+ int offset = strlen(kHlsSignature);
+ while (offset < buffer_size) {
+ if (buffer[offset] == '#') {
+ if (StartsWith(buffer + offset, buffer_size - offset, kHls1) ||
+ StartsWith(buffer + offset, buffer_size - offset, kHls2) ||
+ StartsWith(buffer + offset, buffer_size - offset, kHls3)) {
+ return true;
+ }
+ }
+ ++offset;
+ }
+ }
+ return false;
+}
+
+// Checks for a MJPEG stream.
+static bool CheckMJpeg(const uint8* buffer, int buffer_size) {
+ // Reference: ISO/IEC 10918-1 : 1993(E), Annex B
+ // (http://www.w3.org/Graphics/JPEG/itu-t81.pdf)
+ RCHECK(buffer_size >= 16);
+
+ int offset = 0;
+ int last_restart = -1;
+ int num_codes = 0;
+ while (offset + 5 < buffer_size) {
+ // Marker codes are always a two byte code with the first byte xFF.
+ RCHECK(buffer[offset] == 0xff);
+ uint8 code = buffer[offset + 1];
+ RCHECK(code >= 0xc0 || code == 1);
+
+ // Skip sequences of xFF.
+ if (code == 0xff) {
+ ++offset;
+ continue;
+ }
+
+ // Success if the next marker code is EOI (end of image)
+ if (code == 0xd9)
+ return true;
+
+ // Check remaining codes.
+ if (code == 0xd8 || code == 1) {
+ // SOI (start of image) / TEM (private use). No other data with header.
+ offset += 2;
+ } else if (code >= 0xd0 && code <= 0xd7) {
+ // RST (restart) codes must be in sequence. No other data with header.
+ int restart = code & 0x07;
+ if (last_restart >= 0)
+ RCHECK(restart == (last_restart + 1) % 8);
+ last_restart = restart;
+ offset += 2;
+ } else {
+ // All remaining marker codes are followed by a length of the header.
+ int length = Read16(buffer + offset + 2) + 2;
+
+ // Special handling of SOS (start of scan) marker since the entropy
+ // coded data follows the SOS. Any xFF byte in the data block must be
+ // followed by x00 in the data.
+ if (code == 0xda) {
+ int number_components = buffer[offset + 4];
+ RCHECK(length == 8 + 2 * number_components);
+
+ // Advance to the next marker.
+ offset += length;
+ while (offset + 2 < buffer_size) {
+ if (buffer[offset] == 0xff && buffer[offset + 1] != 0)
+ break;
+ ++offset;
+ }
+ } else {
+ // Skip over the marker data for the other marker codes.
+ offset += length;
+ }
+ }
+ ++num_codes;
+ }
+ return (num_codes > 1);
+}
+
+enum Mpeg2StartCodes {
+ PROGRAM_END_CODE = 0xb9,
+ PACK_START_CODE = 0xba
+};
+
+// Checks for a MPEG2 Program Stream.
+static bool CheckMpeg2ProgramStream(const uint8* buffer, int buffer_size) {
+ // Reference: ISO/IEC 13818-1 : 2000 (E) / ITU-T Rec. H.222.0 (2000 E).
+ RCHECK(buffer_size > 14);
+
+ int offset = 0;
+ while (offset + 14 < buffer_size) {
+ BitReader reader(buffer + offset, 14);
+
+ // Must start with pack_start_code.
+ RCHECK(ReadBits(&reader, 24) == 1);
+ RCHECK(ReadBits(&reader, 8) == PACK_START_CODE);
+
+ // Determine MPEG version (MPEG1 has b0010, while MPEG2 has b01).
+ int mpeg_version = ReadBits(&reader, 2);
+ if (mpeg_version == 0) {
+ // MPEG1, 10 byte header
+ // Validate rest of version code
+ RCHECK(ReadBits(&reader, 2) == 2);
+ } else {
+ RCHECK(mpeg_version == 1);
+ }
+
+ // Skip system_clock_reference_base [32..30].
+ reader.SkipBits(3);
+
+ // Verify marker bit.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Skip system_clock_reference_base [29..15].
+ reader.SkipBits(15);
+
+ // Verify next marker bit.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Skip system_clock_reference_base [14..0].
+ reader.SkipBits(15);
+
+ // Verify next marker bit.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ if (mpeg_version == 0) {
+ // Verify second marker bit.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Skip mux_rate.
+ reader.SkipBits(22);
+
+ // Verify next marker bit.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Update offset to be after this header.
+ offset += 12;
+ } else {
+ // Must be MPEG2.
+ // Skip program_mux_rate.
+ reader.SkipBits(22);
+
+ // Verify pair of marker bits.
+ RCHECK(ReadBits(&reader, 2) == 3);
+
+ // Skip reserved.
+ reader.SkipBits(5);
+
+ // Update offset to be after this header.
+ int pack_stuffing_length = ReadBits(&reader, 3);
+ offset += 14 + pack_stuffing_length;
+ }
+
+ // Check for system headers and PES_packets.
+ while (offset + 6 < buffer_size && Read24(buffer + offset) == 1) {
+ // Next 8 bits determine stream type.
+ int stream_id = buffer[offset + 3];
+
+ // Some stream types are reserved and shouldn't occur.
+ if (mpeg_version == 0)
+ RCHECK(stream_id != 0xbc && stream_id < 0xf0);
+ else
+ RCHECK(stream_id != 0xfc && stream_id != 0xfd && stream_id != 0xfe);
+
+ // Some stream types are used for pack headers.
+ if (stream_id == PACK_START_CODE) // back to outer loop.
+ break;
+ if (stream_id == PROGRAM_END_CODE) // end of stream.
+ return true;
+
+ int pes_length = Read16(buffer + offset + 4);
+ RCHECK(pes_length > 0);
+ offset = offset + 6 + pes_length;
+ }
+ }
+ // Success as we are off the end of the buffer and liked everything
+ // in the buffer.
+ return true;
+}
+
+const uint8 kMpeg2SyncWord = 0x47;
+
+// Checks for a MPEG2 Transport Stream.
+static bool CheckMpeg2TransportStream(const uint8* buffer, int buffer_size) {
+ // Spec: ISO/IEC 13818-1 : 2000 (E) / ITU-T Rec. H.222.0 (2000 E).
+ // Normal packet size is 188 bytes. However, some systems add various error
+ // correction data at the end, resulting in packet of length 192/204/208
+ // (https://en.wikipedia.org/wiki/MPEG_transport_stream). Determine the
+ // length with the first packet.
+ RCHECK(buffer_size >= 250); // Want more than 1 packet to check.
+
+ int offset = 0;
+ int packet_length = -1;
+ while (buffer[offset] != kMpeg2SyncWord && offset < 20) {
+ // Skip over any header in the first 20 bytes.
+ ++offset;
+ }
+
+ while (offset + 6 < buffer_size) {
+ BitReader reader(buffer + offset, 6);
+
+ // Must start with sync byte.
+ RCHECK(ReadBits(&reader, 8) == kMpeg2SyncWord);
+
+ // Skip transport_error_indicator, payload_unit_start_indicator, and
+ // transport_priority.
+ reader.SkipBits(1 + 1 + 1);
+
+ // Verify the pid is not a reserved value.
+ int pid = ReadBits(&reader, 13);
+ RCHECK(pid < 3 || pid > 15);
+
+ // Skip transport_scrambling_control.
+ reader.SkipBits(2);
+
+ // Adaptation_field_control can not be 0.
+ int adaptation_field_control = ReadBits(&reader, 2);
+ RCHECK(adaptation_field_control != 0);
+
+ // If there is an adaptation_field, verify it.
+ if (adaptation_field_control >= 2) {
+ // Skip continuity_counter.
+ reader.SkipBits(4);
+
+ // Get adaptation_field_length and verify it.
+ int adaptation_field_length = ReadBits(&reader, 8);
+ if (adaptation_field_control == 2)
+ RCHECK(adaptation_field_length == 183);
+ else
+ RCHECK(adaptation_field_length <= 182);
+ }
+
+ // Attempt to determine the packet length on the first packet.
+ if (packet_length < 0) {
+ if (buffer[offset + 188] == kMpeg2SyncWord)
+ packet_length = 188;
+ else if (buffer[offset + 192] == kMpeg2SyncWord)
+ packet_length = 192;
+ else if (buffer[offset + 204] == kMpeg2SyncWord)
+ packet_length = 204;
+ else
+ packet_length = 208;
+ }
+ offset += packet_length;
+ }
+ return true;
+}
+
+enum Mpeg4StartCodes {
+ VISUAL_OBJECT_SEQUENCE_START_CODE = 0xb0,
+ VISUAL_OBJECT_SEQUENCE_END_CODE = 0xb1,
+ VISUAL_OBJECT_START_CODE = 0xb5,
+ VOP_START_CODE = 0xb6
+};
+
+// Checks for a raw MPEG4 bitstream container.
+static bool CheckMpeg4BitStream(const uint8* buffer, int buffer_size) {
+ // Defined in ISO/IEC 14496-2:2001.
+ // However, no length ... simply scan for start code values.
+ // Note tags are very similar to H.264.
+ RCHECK(buffer_size > 4);
+
+ int offset = 0;
+ int sequence_start_count = 0;
+ int sequence_end_count = 0;
+ int visual_object_count = 0;
+ int vop_count = 0;
+ while (true) {
+ // Advance to start_code, if there is one.
+ if (!AdvanceToStartCode(buffer, buffer_size, &offset, 6, 24, 1)) {
+ // Not a complete sequence in memory, so return true if we've seen a
+ // visual_object_sequence_start_code and a visual_object_start_code.
+ return (sequence_start_count > 0 && visual_object_count > 0);
+ }
+
+ // Now verify the block. AdvanceToStartCode() made sure that there are
+ // at least 6 bytes remaining in the buffer.
+ BitReader reader(buffer + offset, 6);
+ RCHECK(ReadBits(&reader, 24) == 1);
+
+ int start_code = ReadBits(&reader, 8);
+ RCHECK(start_code < 0x30 || start_code > 0xaf); // 30..AF and
+ RCHECK(start_code < 0xb7 || start_code > 0xb9); // B7..B9 reserved
+
+ switch (start_code) {
+ case VISUAL_OBJECT_SEQUENCE_START_CODE: {
+ ++sequence_start_count;
+ // Verify profile in not one of many reserved values.
+ int profile = ReadBits(&reader, 8);
+ RCHECK(profile > 0);
+ RCHECK(profile < 0x04 || profile > 0x10);
+ RCHECK(profile < 0x13 || profile > 0x20);
+ RCHECK(profile < 0x23 || profile > 0x31);
+ RCHECK(profile < 0x35 || profile > 0x41);
+ RCHECK(profile < 0x43 || profile > 0x60);
+ RCHECK(profile < 0x65 || profile > 0x70);
+ RCHECK(profile < 0x73 || profile > 0x80);
+ RCHECK(profile < 0x83 || profile > 0x90);
+ RCHECK(profile < 0x95 || profile > 0xa0);
+ RCHECK(profile < 0xa4 || profile > 0xb0);
+ RCHECK(profile < 0xb5 || profile > 0xc0);
+ RCHECK(profile < 0xc3 || profile > 0xd0);
+ RCHECK(profile < 0xe4);
+ break;
+ }
+
+ case VISUAL_OBJECT_SEQUENCE_END_CODE:
+ RCHECK(++sequence_end_count == sequence_start_count);
+ break;
+
+ case VISUAL_OBJECT_START_CODE: {
+ ++visual_object_count;
+ if (ReadBits(&reader, 1) == 1) {
+ int visual_object_verid = ReadBits(&reader, 4);
+ RCHECK(visual_object_verid > 0 && visual_object_verid < 3);
+ RCHECK(ReadBits(&reader, 3) != 0);
+ }
+ int visual_object_type = ReadBits(&reader, 4);
+ RCHECK(visual_object_type > 0 && visual_object_type < 6);
+ break;
+ }
+
+ case VOP_START_CODE:
+ RCHECK(++vop_count <= visual_object_count);
+ break;
+ }
+ // Skip this block.
+ offset += 6;
+ }
+}
+
+// Additional checks for a MOV/QuickTime/MPEG4 container.
+static bool CheckMov(const uint8* buffer, int buffer_size) {
+ // Reference: ISO/IEC 14496-12:2005(E).
+ // (http://standards.iso.org/ittf/PubliclyAvailableStandards/c061988_ISO_IEC_14496-12_2012.zip)
+ RCHECK(buffer_size > 8);
+
+ int offset = 0;
+ while (offset + 8 < buffer_size) {
+ int atomsize = Read32(buffer + offset);
+ uint32 atomtype = Read32(buffer + offset + 4);
+ // Only need to check for ones that are valid at the top level.
+ switch (atomtype) {
+ case TAG('f','t','y','p'):
+ case TAG('p','d','i','n'):
+ case TAG('m','o','o','v'):
+ case TAG('m','o','o','f'):
+ case TAG('m','f','r','a'):
+ case TAG('m','d','a','t'):
+ case TAG('f','r','e','e'):
+ case TAG('s','k','i','p'):
+ case TAG('m','e','t','a'):
+ case TAG('m','e','c','o'):
+ case TAG('s','t','y','p'):
+ case TAG('s','i','d','x'):
+ case TAG('s','s','i','x'):
+ case TAG('p','r','f','t'):
+ case TAG('b','l','o','c'):
+ break;
+ default:
+ return false;
+ }
+ if (atomsize == 1) {
+ // Indicates that the length is the next 64bits.
+ if (offset + 16 > buffer_size)
+ break;
+ if (Read32(buffer + offset + 8) != 0)
+ break; // Offset is way past buffer size.
+ atomsize = Read32(buffer + offset + 12);
+ }
+ if (atomsize <= 0)
+ break; // Indicates the last atom or length too big.
+ offset += atomsize;
+ }
+ return true;
+}
+
+enum MPEGVersion {
+ VERSION_25 = 0,
+ VERSION_RESERVED,
+ VERSION_2,
+ VERSION_1
+};
+enum MPEGLayer {
+ L_RESERVED = 0,
+ LAYER_3,
+ LAYER_2,
+ LAYER_1
+};
+
+static int kSampleRateTable[4][4] = { { 11025, 12000, 8000, 0 }, // v2.5
+ { 0, 0, 0, 0 }, // not used
+ { 22050, 24000, 16000, 0 }, // v2
+ { 44100, 48000, 32000, 0 } // v1
+};
+
+static int kBitRateTableV1L1[16] = { 0, 32, 64, 96, 128, 160, 192, 224, 256,
+ 288, 320, 352, 384, 416, 448, 0 };
+static int kBitRateTableV1L2[16] = { 0, 32, 48, 56, 64, 80, 96, 112, 128, 160,
+ 192, 224, 256, 320, 384, 0 };
+static int kBitRateTableV1L3[16] = { 0, 32, 40, 48, 56, 64, 80, 96, 112, 128,
+ 160, 192, 224, 256, 320, 0 };
+static int kBitRateTableV2L1[16] = { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144,
+ 160, 176, 192, 224, 256, 0 };
+static int kBitRateTableV2L23[16] = { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,
+ 112, 128, 144, 160, 0 };
+
+static bool ValidMpegAudioFrameHeader(const uint8* header,
+ int header_size,
+ int* framesize) {
+ // Reference: http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
+ DCHECK_GE(header_size, 4);
+ *framesize = 0;
+ BitReader reader(header, 4); // Header can only be 4 bytes long.
+
+ // Verify frame sync (11 bits) are all set.
+ RCHECK(ReadBits(&reader, 11) == 0x7ff);
+
+ // Verify MPEG audio version id.
+ int version = ReadBits(&reader, 2);
+ RCHECK(version != 1); // Reserved.
+
+ // Verify layer.
+ int layer = ReadBits(&reader, 2);
+ RCHECK(layer != 0);
+
+ // Skip protection bit.
+ reader.SkipBits(1);
+
+ // Verify bitrate index.
+ int bitrate_index = ReadBits(&reader, 4);
+ RCHECK(bitrate_index != 0xf);
+
+ // Verify sampling rate frequency index.
+ int sampling_index = ReadBits(&reader, 2);
+ RCHECK(sampling_index != 3);
+
+ // Get padding bit.
+ int padding = ReadBits(&reader, 1);
+
+ // Frame size:
+ // For Layer I files = (12 * BitRate / SampleRate + Padding) * 4
+ // For others = 144 * BitRate / SampleRate + Padding
+ // Unfortunately, BitRate and SampleRate are coded.
+ int sampling_rate = kSampleRateTable[version][sampling_index];
+ int bitrate;
+ if (version == VERSION_1) {
+ if (layer == LAYER_1)
+ bitrate = kBitRateTableV1L1[bitrate_index];
+ else if (layer == LAYER_2)
+ bitrate = kBitRateTableV1L2[bitrate_index];
+ else
+ bitrate = kBitRateTableV1L3[bitrate_index];
+ } else {
+ if (layer == LAYER_1)
+ bitrate = kBitRateTableV2L1[bitrate_index];
+ else
+ bitrate = kBitRateTableV2L23[bitrate_index];
+ }
+ if (layer == LAYER_1)
+ *framesize = ((12000 * bitrate) / sampling_rate + padding) * 4;
+ else
+ *framesize = (144000 * bitrate) / sampling_rate + padding;
+ return (bitrate > 0 && sampling_rate > 0);
+}
+
+// Extract a size encoded the MP3 way.
+static int GetMp3HeaderSize(const uint8* buffer, int buffer_size) {
+ DCHECK_GE(buffer_size, 9);
+ int size = ((buffer[6] & 0x7f) << 21) + ((buffer[7] & 0x7f) << 14) +
+ ((buffer[8] & 0x7f) << 7) + (buffer[9] & 0x7f) + 10;
+ if (buffer[5] & 0x10) // Footer added?
+ size += 10;
+ return size;
+}
+
+// Additional checks for a MP3 container.
+static bool CheckMp3(const uint8* buffer, int buffer_size, bool seenHeader) {
+ RCHECK(buffer_size >= 10); // Must be enough to read the initial header.
+
+ int framesize;
+ int numSeen = 0;
+ int offset = 0;
+ if (seenHeader) {
+ offset = GetMp3HeaderSize(buffer, buffer_size);
+ } else {
+ // Skip over leading 0's.
+ while (offset < buffer_size && buffer[offset] == 0)
+ ++offset;
+ }
+
+ while (offset + 3 < buffer_size) {
+ RCHECK(ValidMpegAudioFrameHeader(
+ buffer + offset, buffer_size - offset, &framesize));
+
+ // Have we seen enough valid headers?
+ if (++numSeen > 10)
+ return true;
+ offset += framesize;
+ }
+ // Off the end of the buffer, return success if a few valid headers seen.
+ return numSeen > 2;
+}
+
+// Check that the next characters in |buffer| represent a number. The format
+// accepted is optional whitespace followed by 1 or more digits. |max_digits|
+// specifies the maximum number of digits to process. Returns true if a valid
+// number is found, false otherwise.
+static bool VerifyNumber(const uint8* buffer,
+ int buffer_size,
+ int* offset,
+ int max_digits) {
+ RCHECK(*offset < buffer_size);
+
+ // Skip over any leading space.
+ while (isspace(buffer[*offset])) {
+ ++(*offset);
+ RCHECK(*offset < buffer_size);
+ }
+
+ // Need to process up to max_digits digits.
+ int numSeen = 0;
+ while (--max_digits >= 0 && isdigit(buffer[*offset])) {
+ ++numSeen;
+ ++(*offset);
+ if (*offset >= buffer_size)
+ return true; // Out of space but seen a digit.
+ }
+
+ // Success if at least one digit seen.
+ return (numSeen > 0);
+}
+
+// Check that the next character in |buffer| is one of |c1| or |c2|. |c2| is
+// optional. Returns true if there is a match, false if no match or out of
+// space.
+static inline bool VerifyCharacters(const uint8* buffer,
+ int buffer_size,
+ int* offset,
+ char c1,
+ char c2) {
+ RCHECK(*offset < buffer_size);
+ char c = static_cast<char>(buffer[(*offset)++]);
+ return (c == c1 || (c == c2 && c2 != 0));
+}
+
+// Checks for a SRT container.
+static bool CheckSrt(const uint8* buffer, int buffer_size) {
+ // Reference: http://en.wikipedia.org/wiki/SubRip
+ RCHECK(buffer_size > 20);
+
+ // First line should just be the subtitle sequence number.
+ int offset = StartsWith(buffer, buffer_size, UTF8_BYTE_ORDER_MARK) ? 3 : 0;
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 100));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, '\n', '\r'));
+
+ // Skip any additional \n\r.
+ while (VerifyCharacters(buffer, buffer_size, &offset, '\n', '\r')) {}
+ --offset; // Since VerifyCharacters() gobbled up the next non-CR/LF.
+
+ // Second line should look like the following:
+ // 00:00:10,500 --> 00:00:13,000
+ // Units separator can be , or .
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 100));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ':', 0));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 2));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ':', 0));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 2));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ',', '.'));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 3));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ' ', 0));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, '-', 0));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, '-', 0));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, '>', 0));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ' ', 0));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 100));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ':', 0));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 2));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ':', 0));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 2));
+ RCHECK(VerifyCharacters(buffer, buffer_size, &offset, ',', '.'));
+ RCHECK(VerifyNumber(buffer, buffer_size, &offset, 3));
+ return true;
+}
+
+// Read a Matroska Element Id.
+static int GetElementId(BitReader* reader) {
+ // Element ID is coded with the leading zero bits (max 3) determining size.
+ // If it is an invalid encoding or the end of the buffer is reached,
+ // return -1 as a tag that won't be expected.
+ if (reader->bits_available() >= 8) {
+ int num_bits_to_read = 0;
+ static int prefix[] = { 0x80, 0x4000, 0x200000, 0x10000000 };
+ for (int i = 0; i < 4; ++i) {
+ num_bits_to_read += 7;
+ if (ReadBits(reader, 1) == 1) {
+ if (reader->bits_available() < num_bits_to_read)
+ break;
+ // prefix[] adds back the bits read individually.
+ return ReadBits(reader, num_bits_to_read) | prefix[i];
+ }
+ }
+ }
+ // Invalid encoding, return something not expected.
+ return -1;
+}
+
+// Read a Matroska Unsigned Integer (VINT).
+static uint64 GetVint(BitReader* reader) {
+ // Values are coded with the leading zero bits (max 7) determining size.
+ // If it is an invalid coding or the end of the buffer is reached,
+ // return something that will go off the end of the buffer.
+ if (reader->bits_available() >= 8) {
+ int num_bits_to_read = 0;
+ for (int i = 0; i < 8; ++i) {
+ num_bits_to_read += 7;
+ if (ReadBits(reader, 1) == 1) {
+ if (reader->bits_available() < num_bits_to_read)
+ break;
+ return ReadBits(reader, num_bits_to_read);
+ }
+ }
+ }
+ // Incorrect format (more than 7 leading 0's) or off the end of the buffer.
+ // Since the return value is used as a byte size, return a value that will
+ // cause a failure when used.
+ return (reader->bits_available() / 8) + 2;
+}
+
+// Additional checks for a WEBM container.
+static bool CheckWebm(const uint8* buffer, int buffer_size) {
+ // Reference: http://www.matroska.org/technical/specs/index.html
+ RCHECK(buffer_size > 12);
+
+ BitReader reader(buffer, buffer_size);
+
+ // Verify starting Element Id.
+ RCHECK(GetElementId(&reader) == 0x1a45dfa3);
+
+ // Get the header size, and ensure there are enough bits to check.
+ int header_size = GetVint(&reader);
+ RCHECK(reader.bits_available() / 8 >= header_size);
+
+ // Loop through the header.
+ while (reader.bits_available() > 0) {
+ int tag = GetElementId(&reader);
+ int tagsize = GetVint(&reader);
+ switch (tag) {
+ case 0x4286: // EBMLVersion
+ case 0x42f7: // EBMLReadVersion
+ case 0x42f2: // EBMLMaxIdLength
+ case 0x42f3: // EBMLMaxSizeLength
+ case 0x4287: // DocTypeVersion
+ case 0x4285: // DocTypeReadVersion
+ case 0xec: // void
+ case 0xbf: // CRC32
+ RCHECK(reader.SkipBits(tagsize * 8));
+ break;
+
+ case 0x4282: // EBMLDocType
+ // Need to see "webm" or "matroska" next.
+ switch (ReadBits(&reader, 32)) {
+ case TAG('w', 'e', 'b', 'm') :
+ return true;
+ case TAG('m', 'a', 't', 'r') :
+ return (ReadBits(&reader, 32) == TAG('o', 's', 'k', 'a'));
+ }
+ return false;
+
+ default: // Unrecognized tag
+ return false;
+ }
+ }
+ return false;
+}
+
+enum VC1StartCodes {
+ VC1_FRAME_START_CODE = 0x0d,
+ VC1_ENTRY_POINT_START_CODE = 0x0e,
+ VC1_SEQUENCE_START_CODE = 0x0f
+};
+
+// Checks for a VC1 bitstream container.
+static bool CheckVC1(const uint8* buffer, int buffer_size) {
+ // Reference: SMPTE 421M
+ // (http://standards.smpte.org/content/978-1-61482-555-5/st-421-2006/SEC1.body.pdf)
+ // However, no length ... simply scan for start code values.
+ // Expect to see SEQ | [ [ ENTRY ] PIC* ]*
+ // Note tags are very similar to H.264.
+
+ RCHECK(buffer_size >= 24);
+
+ // First check for Bitstream Metadata Serialization (Annex L)
+ if (buffer[0] == 0xc5 &&
+ Read32(buffer + 4) == 0x04 &&
+ Read32(buffer + 20) == 0x0c) {
+ // Verify settings in STRUCT_C and STRUCT_A
+ BitReader reader(buffer + 8, 12);
+
+ int profile = ReadBits(&reader, 4);
+ if (profile == 0 || profile == 4) { // simple or main
+ // Skip FRMRTQ_POSTPROC, BITRTQ_POSTPROC, and LOOPFILTER.
+ reader.SkipBits(3 + 5 + 1);
+
+ // Next bit must be 0.
+ RCHECK(ReadBits(&reader, 1) == 0);
+
+ // Skip MULTIRES.
+ reader.SkipBits(1);
+
+ // Next bit must be 1.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ // Skip FASTUVMC, EXTENDED_MV, DQUANT, and VSTRANSFORM.
+ reader.SkipBits(1 + 1 + 2 + 1);
+
+ // Next bit must be 0.
+ RCHECK(ReadBits(&reader, 1) == 0);
+
+ // Skip OVERLAP, SYNCMARKER, RANGERED, MAXBFRAMES, QUANTIZER, and
+ // FINTERPFLAG.
+ reader.SkipBits(1 + 1 + 1 + 3 + 2 + 1);
+
+ // Next bit must be 1.
+ RCHECK(ReadBits(&reader, 1) == 1);
+
+ } else {
+ RCHECK(profile == 12); // Other profile values not allowed.
+ RCHECK(ReadBits(&reader, 28) == 0);
+ }
+
+ // Now check HORIZ_SIZE and VERT_SIZE, which must be 8192 or less.
+ RCHECK(ReadBits(&reader, 32) <= 8192);
+ RCHECK(ReadBits(&reader, 32) <= 8192);
+ return true;
+ }
+
+ // Buffer isn't Bitstream Metadata, so scan for start codes.
+ int offset = 0;
+ int sequence_start_code = 0;
+ int frame_start_code = 0;
+ while (true) {
+ // Advance to start_code, if there is one.
+ if (!AdvanceToStartCode(buffer, buffer_size, &offset, 5, 24, 1)) {
+ // Not a complete sequence in memory, so return true if we've seen a
+ // sequence start and a frame start (not checking entry points since
+ // they only occur in advanced profiles).
+ return (sequence_start_code > 0 && frame_start_code > 0);
+ }
+
+ // Now verify the block. AdvanceToStartCode() made sure that there are
+ // at least 5 bytes remaining in the buffer.
+ BitReader reader(buffer + offset, 5);
+ RCHECK(ReadBits(&reader, 24) == 1);
+
+ // Keep track of the number of certain types received.
+ switch (ReadBits(&reader, 8)) {
+ case VC1_SEQUENCE_START_CODE: {
+ ++sequence_start_code;
+ switch (ReadBits(&reader, 2)) {
+ case 0: // simple
+ case 1: // main
+ RCHECK(ReadBits(&reader, 2) == 0);
+ break;
+ case 2: // complex
+ return false;
+ case 3: // advanced
+ RCHECK(ReadBits(&reader, 3) <= 4); // Verify level = 0..4
+ RCHECK(ReadBits(&reader, 2) == 1); // Verify colordiff_format = 1
+ break;
+ }
+ break;
+ }
+
+ case VC1_ENTRY_POINT_START_CODE:
+ // No fields in entry data to check. However, it must occur after
+ // sequence header.
+ RCHECK(sequence_start_code > 0);
+ break;
+
+ case VC1_FRAME_START_CODE:
+ ++frame_start_code;
+ break;
+ }
+ offset += 5;
+ }
+}
+
+// For some formats the signature is a bunch of characters. They are defined
+// below. Note that the first 4 characters of the string may be used as a TAG
+// in LookupContainerByFirst4. For signatures that contain embedded \0, use
+// uint8[].
+static const char kAmrSignature[] = "#!AMR";
+static const uint8 kAsfSignature[] = { 0x30, 0x26, 0xb2, 0x75, 0x8e, 0x66, 0xcf,
+ 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62,
+ 0xce, 0x6c };
+static const char kAssSignature[] = "[Script Info]";
+static const char kAssBomSignature[] = UTF8_BYTE_ORDER_MARK "[Script Info]";
+static const uint8 kWtvSignature[] = { 0xb7, 0xd8, 0x00, 0x20, 0x37, 0x49, 0xda,
+ 0x11, 0xa6, 0x4e, 0x00, 0x07, 0xe9, 0x5e,
+ 0xad, 0x8d };
+
+// Attempt to determine the container type from the buffer provided. This is
+// a simple pass, that uses the first 4 bytes of the buffer as an index to get
+// a rough idea of the container format.
+static MediaContainerName LookupContainerByFirst4(const uint8* buffer,
+ int buffer_size) {
+ // Minimum size that the code expects to exist without checking size.
+ if (buffer_size < 12)
+ return CONTAINER_UNKNOWN;
+
+ uint32 first4 = Read32(buffer);
+ switch (first4) {
+ case 0x1a45dfa3:
+ if (CheckWebm(buffer, buffer_size))
+ return CONTAINER_WEBM;
+ break;
+
+ case 0x3026b275:
+ if (StartsWith(buffer,
+ buffer_size,
+ kAsfSignature,
+ sizeof(kAsfSignature))) {
+ return CONTAINER_ASF;
+ }
+ break;
+
+ case TAG('#','!','A','M'):
+ if (StartsWith(buffer, buffer_size, kAmrSignature))
+ return CONTAINER_AMR;
+ break;
+
+ case TAG('#','E','X','T'):
+ if (CheckHls(buffer, buffer_size))
+ return CONTAINER_HLS;
+ break;
+
+ case TAG('.','R','M','F'):
+ if (buffer[4] == 0 && buffer[5] == 0)
+ return CONTAINER_RM;
+ break;
+
+ case TAG('.','r','a','\xfd'):
+ return CONTAINER_RM;
+
+ case TAG('B','I','K','b'):
+ case TAG('B','I','K','d'):
+ case TAG('B','I','K','f'):
+ case TAG('B','I','K','g'):
+ case TAG('B','I','K','h'):
+ case TAG('B','I','K','i'):
+ if (CheckBink(buffer, buffer_size))
+ return CONTAINER_BINK;
+ break;
+
+ case TAG('c','a','f','f'):
+ if (CheckCaf(buffer, buffer_size))
+ return CONTAINER_CAF;
+ break;
+
+ case TAG('D','E','X','A'):
+ if (buffer_size > 15 &&
+ Read16(buffer + 11) <= 2048 &&
+ Read16(buffer + 13) <= 2048) {
+ return CONTAINER_DXA;
+ }
+ break;
+
+ case TAG('D','T','S','H'):
+ if (Read32(buffer + 4) == TAG('D','H','D','R'))
+ return CONTAINER_DTSHD;
+ break;
+
+ case 0x64a30100:
+ case 0x64a30200:
+ case 0x64a30300:
+ case 0x64a30400:
+ case 0x0001a364:
+ case 0x0002a364:
+ case 0x0003a364:
+ if (Read32(buffer + 4) != 0 && Read32(buffer + 8) != 0)
+ return CONTAINER_IRCAM;
+ break;
+
+ case TAG('f','L','a','C'):
+ return CONTAINER_FLAC;
+
+ case TAG('F','L','V',0):
+ case TAG('F','L','V',1):
+ case TAG('F','L','V',2):
+ case TAG('F','L','V',3):
+ case TAG('F','L','V',4):
+ if (buffer[5] == 0 && Read32(buffer + 5) > 8)
+ return CONTAINER_FLV;
+ break;
+
+ case TAG('F','O','R','M'):
+ switch (Read32(buffer + 8)) {
+ case TAG('A','I','F','F'):
+ case TAG('A','I','F','C'):
+ return CONTAINER_AIFF;
+ }
+ break;
+
+ case TAG('M','A','C',' '):
+ return CONTAINER_APE;
+
+ case TAG('O','N','2',' '):
+ if (Read32(buffer + 8) == TAG('O','N','2','f'))
+ return CONTAINER_AVI;
+ break;
+
+ case TAG('O','g','g','S'):
+ if (buffer[5] <= 7)
+ return CONTAINER_OGG;
+ break;
+
+ case TAG('R','F','6','4'):
+ if (buffer_size > 16 && Read32(buffer + 12) == TAG('d','s','6','4'))
+ return CONTAINER_WAV;
+ break;
+
+ case TAG('R','I','F','F'):
+ switch (Read32(buffer + 8)) {
+ case TAG('A','V','I',' '):
+ case TAG('A','V','I','X'):
+ case TAG('A','V','I','\x19'):
+ case TAG('A','M','V',' '):
+ return CONTAINER_AVI;
+ case TAG('W','A','V','E'):
+ return CONTAINER_WAV;
+ }
+ break;
+
+ case TAG('[','S','c','r'):
+ if (StartsWith(buffer, buffer_size, kAssSignature))
+ return CONTAINER_ASS;
+ break;
+
+ case TAG('\xef','\xbb','\xbf','['):
+ if (StartsWith(buffer, buffer_size, kAssBomSignature))
+ return CONTAINER_ASS;
+ break;
+
+ case 0x7ffe8001:
+ case 0xfe7f0180:
+ case 0x1fffe800:
+ case 0xff1f00e8:
+ if (CheckDts(buffer, buffer_size))
+ return CONTAINER_DTS;
+ break;
+
+ case 0xb7d80020:
+ if (StartsWith(buffer,
+ buffer_size,
+ kWtvSignature,
+ sizeof(kWtvSignature))) {
+ return CONTAINER_WTV;
+ }
+ break;
+ }
+
+ // Now try a few different ones that look at something other
+ // than the first 4 bytes.
+ uint32 first3 = first4 & 0xffffff00;
+ switch (first3) {
+ case TAG('C','W','S',0):
+ case TAG('F','W','S',0):
+ return CONTAINER_SWF;
+
+ case TAG('I','D','3',0):
+ if (CheckMp3(buffer, buffer_size, true))
+ return CONTAINER_MP3;
+ break;
+ }
+
+ // Maybe the first 2 characters are something we can use.
+ uint32 first2 = Read16(buffer);
+ switch (first2) {
+ case kAc3SyncWord:
+ if (CheckAc3(buffer, buffer_size))
+ return CONTAINER_AC3;
+ if (CheckEac3(buffer, buffer_size))
+ return CONTAINER_EAC3;
+ break;
+
+ case 0xfff0:
+ case 0xfff1:
+ case 0xfff8:
+ case 0xfff9:
+ if (CheckAac(buffer, buffer_size))
+ return CONTAINER_AAC;
+ break;
+ }
+
+ // Check if the file is in MP3 format without the header.
+ if (CheckMp3(buffer, buffer_size, false))
+ return CONTAINER_MP3;
+
+ return CONTAINER_UNKNOWN;
+}
+
+// Attempt to determine the container name from the buffer provided.
+MediaContainerName DetermineContainer(const uint8* buffer, int buffer_size) {
+ DCHECK(buffer);
+
+ // Since MOV/QuickTime/MPEG4 streams are common, check for them first.
+ if (CheckMov(buffer, buffer_size))
+ return CONTAINER_MOV;
+
+ // Next attempt the simple checks, that typically look at just the
+ // first few bytes of the file.
+ MediaContainerName result = LookupContainerByFirst4(buffer, buffer_size);
+ if (result != CONTAINER_UNKNOWN)
+ return result;
+
+ // Additional checks that may scan a portion of the buffer.
+ if (CheckMpeg2ProgramStream(buffer, buffer_size))
+ return CONTAINER_MPEG2PS;
+ if (CheckMpeg2TransportStream(buffer, buffer_size))
+ return CONTAINER_MPEG2TS;
+ if (CheckMJpeg(buffer, buffer_size))
+ return CONTAINER_MJPEG;
+ if (CheckDV(buffer, buffer_size))
+ return CONTAINER_DV;
+ if (CheckH261(buffer, buffer_size))
+ return CONTAINER_H261;
+ if (CheckH263(buffer, buffer_size))
+ return CONTAINER_H263;
+ if (CheckH264(buffer, buffer_size))
+ return CONTAINER_H264;
+ if (CheckMpeg4BitStream(buffer, buffer_size))
+ return CONTAINER_MPEG4BS;
+ if (CheckVC1(buffer, buffer_size))
+ return CONTAINER_VC1;
+ if (CheckSrt(buffer, buffer_size))
+ return CONTAINER_SRT;
+ if (CheckGsm(buffer, buffer_size))
+ return CONTAINER_GSM;
+
+ // AC3/EAC3 might not start at the beginning of the stream,
+ // so scan for a start code.
+ int offset = 1; // No need to start at byte 0 due to First4 check.
+ if (AdvanceToStartCode(buffer, buffer_size, &offset, 4, 16, kAc3SyncWord)) {
+ if (CheckAc3(buffer + offset, buffer_size - offset))
+ return CONTAINER_AC3;
+ if (CheckEac3(buffer + offset, buffer_size - offset))
+ return CONTAINER_EAC3;
+ }
+
+ return CONTAINER_UNKNOWN;
+}
+
+} // namespace container_names
+
+} // namespace media
diff --git a/chromium/media/base/container_names.h b/chromium/media/base/container_names.h
new file mode 100644
index 00000000000..7b7b099a00a
--- /dev/null
+++ b/chromium/media/base/container_names.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_CONTAINER_NAMES_H_
+#define MEDIA_BASE_CONTAINER_NAMES_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+namespace container_names {
+
+// This is the set of input container formats detected for logging purposes. Not
+// all of these are enabled (and it varies by product). Any additions need to be
+// done at the end of the list (before CONTAINER_MAX). This list must be kept in
+// sync with the enum definition "MediaContainers" in
+// tools/metrics/histograms/histograms.xml.
+enum MediaContainerName {
+ CONTAINER_UNKNOWN, // Unknown
+ CONTAINER_AAC, // AAC (Advanced Audio Coding)
+ CONTAINER_AC3, // AC-3
+ CONTAINER_AIFF, // AIFF (Audio Interchange File Format)
+ CONTAINER_AMR, // AMR (Adaptive Multi-Rate Audio)
+ CONTAINER_APE, // APE (Monkey's Audio)
+ CONTAINER_ASF, // ASF (Advanced / Active Streaming Format)
+ CONTAINER_ASS, // SSA (SubStation Alpha) subtitle
+ CONTAINER_AVI, // AVI (Audio Video Interleaved)
+ CONTAINER_BINK, // Bink
+ CONTAINER_CAF, // CAF (Apple Core Audio Format)
+ CONTAINER_DTS, // DTS
+ CONTAINER_DTSHD, // DTS-HD
+ CONTAINER_DV, // DV (Digital Video)
+ CONTAINER_DXA, // DXA
+ CONTAINER_EAC3, // Enhanced AC-3
+ CONTAINER_FLAC, // FLAC (Free Lossless Audio Codec)
+ CONTAINER_FLV, // FLV (Flash Video)
+ CONTAINER_GSM, // GSM (Global System for Mobile Audio)
+ CONTAINER_H261, // H.261
+ CONTAINER_H263, // H.263
+ CONTAINER_H264, // H.264
+ CONTAINER_HLS, // HLS (Apple HTTP Live Streaming PlayList)
+ CONTAINER_IRCAM, // Berkeley/IRCAM/CARL Sound Format
+ CONTAINER_MJPEG, // MJPEG video
+ CONTAINER_MOV, // QuickTime / MOV / MPEG4
+ CONTAINER_MP3, // MP3 (MPEG audio layer 2/3)
+ CONTAINER_MPEG2PS, // MPEG-2 Program Stream
+ CONTAINER_MPEG2TS, // MPEG-2 Transport Stream
+ CONTAINER_MPEG4BS, // MPEG-4 Bitstream
+ CONTAINER_OGG, // Ogg
+ CONTAINER_RM, // RM (RealMedia)
+ CONTAINER_SRT, // SRT (SubRip subtitle)
+ CONTAINER_SWF, // SWF (ShockWave Flash)
+ CONTAINER_VC1, // VC-1
+ CONTAINER_WAV, // WAV / WAVE (Waveform Audio)
+ CONTAINER_WEBM, // Matroska / WebM
+ CONTAINER_WTV, // WTV (Windows Television)
+ CONTAINER_MAX // Must be last
+};
+
+// Determine the container type.
+MEDIA_EXPORT MediaContainerName DetermineContainer(const uint8* buffer,
+ int buffer_size);
+
+} // namespace container_names
+
+} // namespace media
+
+#endif // MEDIA_BASE_CONTAINER_NAMES_H_
diff --git a/chromium/media/base/container_names_unittest.cc b/chromium/media/base/container_names_unittest.cc
new file mode 100644
index 00000000000..21f80af6d98
--- /dev/null
+++ b/chromium/media/base/container_names_unittest.cc
@@ -0,0 +1,220 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "media/base/container_names.h"
+#include "media/base/test_data_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace container_names {
+
+// Using a macros to simplify tests. Since EXPECT_EQ outputs the second argument
+// as a string when it fails, this lets the output identify what item actually
+// failed.
+#define VERIFY(buffer, name) \
+ EXPECT_EQ(name, \
+ DetermineContainer(reinterpret_cast<const uint8*>(buffer), \
+ sizeof(buffer)))
+
+// Test that small buffers are handled correctly.
+TEST(ContainerNamesTest, CheckSmallBuffer) {
+ // Empty buffer.
+ char buffer[1]; // ([0] not allowed on win)
+ VERIFY(buffer, CONTAINER_UNKNOWN);
+
+ // Try a simple SRT file.
+ char buffer1[] =
+ "1\n"
+ "00:03:23,550 --> 00:03:24,375\n"
+ "You always had a hard time finding your place in this world.\n"
+ "\n"
+ "2\n"
+ "00:03:24,476 --> 00:03:25,175\n"
+ "What are you talking about?\n";
+ VERIFY(buffer1, CONTAINER_SRT);
+
+ // HLS has it's own loop.
+ char buffer2[] = "#EXTM3U"
+ "some other random stuff"
+ "#EXT-X-MEDIA-SEQUENCE:";
+ VERIFY(buffer2, CONTAINER_HLS);
+
+ // Try a large buffer all zeros.
+ char buffer3[4096];
+ memset(buffer3, 0, sizeof(buffer3));
+ VERIFY(buffer3, CONTAINER_UNKNOWN);
+
+ // Reuse buffer, but all \n this time.
+ memset(buffer3, '\n', sizeof(buffer3));
+ VERIFY(buffer3, CONTAINER_UNKNOWN);
+}
+
+#define BYTE_ORDER_MARK "\xef\xbb\xbf"
+
+// Note that the comparisons need at least 12 bytes, so make sure the buffer is
+// at least that size.
+const char kAmrBuffer[12] = "#!AMR";
+uint8 kAsfBuffer[] = { 0x30, 0x26, 0xb2, 0x75, 0x8e, 0x66, 0xcf, 0x11, 0xa6,
+ 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c };
+const char kAss1Buffer[] = "[Script Info]";
+const char kAss2Buffer[] = BYTE_ORDER_MARK "[Script Info]";
+uint8 kCafBuffer[] = { 'c', 'a', 'f', 'f', 0, 1, 0, 0, 'd', 'e', 's', 'c', 0, 0,
+ 0, 0, 0, 0, 0, 32, 64, 229, 136, 128, 0, 0, 0, 0, 'a',
+ 'a', 'c', ' ', 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,
+ 0, 2, 0, 0, 0, 0 };
+const char kDtshdBuffer[12] = "DTSHDHDR";
+const char kDxaBuffer[16] = "DEXA";
+const char kFlacBuffer[12] = "fLaC";
+uint8 kFlvBuffer[12] = { 'F', 'L', 'V', 0, 0, 0, 0, 1, 0, 0, 0, 0 };
+uint8 kIrcamBuffer[] = { 0x64, 0xa3, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1 };
+const char kRm1Buffer[12] = ".RMF\0\0";
+const char kRm2Buffer[12] = ".ra\xfd";
+uint8 kWtvBuffer[] = { 0xb7, 0xd8, 0x00, 0x20, 0x37, 0x49, 0xda, 0x11, 0xa6,
+ 0x4e, 0x00, 0x07, 0xe9, 0x5e, 0xad, 0x8d };
+uint8 kBug263073Buffer[] = {
+ 0x00, 0x00, 0x00, 0x18, 0x66, 0x74, 0x79, 0x70, 0x6d, 0x70, 0x34, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x73, 0x6f, 0x6d, 0x6d, 0x70, 0x34, 0x32,
+ 0x00, 0x00, 0x00, 0x01, 0x6d, 0x64, 0x61, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0xaa, 0x2e, 0x22, 0xcf, 0x00, 0x00, 0x00, 0x37, 0x67, 0x64, 0x00, 0x28,
+ 0xac, 0x2c, 0xa4, 0x01, 0xe0, 0x08, 0x9f, 0x97, 0x01, 0x52, 0x02, 0x02,
+ 0x02, 0x80, 0x00, 0x01};
+
+// Test that containers that start with fixed strings are handled correctly.
+// This is to verify that the TAG matches the first 4 characters of the string.
+TEST(ContainerNamesTest, CheckFixedStrings) {
+ VERIFY(kAmrBuffer, CONTAINER_AMR);
+ VERIFY(kAsfBuffer, CONTAINER_ASF);
+ VERIFY(kAss1Buffer, CONTAINER_ASS);
+ VERIFY(kAss2Buffer, CONTAINER_ASS);
+ VERIFY(kCafBuffer, CONTAINER_CAF);
+ VERIFY(kDtshdBuffer, CONTAINER_DTSHD);
+ VERIFY(kDxaBuffer, CONTAINER_DXA);
+ VERIFY(kFlacBuffer, CONTAINER_FLAC);
+ VERIFY(kFlvBuffer, CONTAINER_FLV);
+ VERIFY(kIrcamBuffer, CONTAINER_IRCAM);
+ VERIFY(kRm1Buffer, CONTAINER_RM);
+ VERIFY(kRm2Buffer, CONTAINER_RM);
+ VERIFY(kWtvBuffer, CONTAINER_WTV);
+ VERIFY(kBug263073Buffer, CONTAINER_MOV);
+}
+
+// Determine the container type of a specified file.
+void TestFile(MediaContainerName expected, const base::FilePath& filename) {
+ char buffer[8192];
+
+ // Windows implementation of ReadFile fails if file smaller than desired size,
+ // so use file length if file less than 8192 bytes (http://crbug.com/243885).
+ int read_size = sizeof(buffer);
+ int64 actual_size;
+ if (file_util::GetFileSize(filename, &actual_size) && actual_size < read_size)
+ read_size = actual_size;
+ int read = file_util::ReadFile(filename, buffer, read_size);
+
+ // Now verify the type.
+ EXPECT_EQ(expected,
+ DetermineContainer(reinterpret_cast<const uint8*>(buffer), read))
+ << "Failure with file " << filename.value();
+}
+
+TEST(ContainerNamesTest, FileCheckOGG) {
+ TestFile(CONTAINER_OGG, GetTestDataFilePath("bear.ogv"));
+ TestFile(CONTAINER_OGG, GetTestDataFilePath("9ch.ogg"));
+}
+
+TEST(ContainerNamesTest, FileCheckWAV) {
+ TestFile(CONTAINER_WAV, GetTestDataFilePath("4ch.wav"));
+ TestFile(CONTAINER_WAV, GetTestDataFilePath("sfx_f32le.wav"));
+ TestFile(CONTAINER_WAV, GetTestDataFilePath("sfx_s16le.wav"));
+}
+
+TEST(ContainerNamesTest, FileCheckMOV) {
+ TestFile(CONTAINER_MOV, GetTestDataFilePath("bear-1280x720.mp4"));
+ TestFile(CONTAINER_MOV, GetTestDataFilePath("sfx.m4a"));
+}
+
+TEST(ContainerNamesTest, FileCheckWEBM) {
+ TestFile(CONTAINER_WEBM, GetTestDataFilePath("bear-320x240.webm"));
+ TestFile(CONTAINER_WEBM, GetTestDataFilePath("no_streams.webm"));
+ TestFile(CONTAINER_WEBM, GetTestDataFilePath("webm_ebml_element"));
+}
+
+TEST(ContainerNamesTest, FileCheckMP3) {
+ TestFile(CONTAINER_MP3, GetTestDataFilePath("id3_test.mp3"));
+ TestFile(CONTAINER_MP3, GetTestDataFilePath("sfx.mp3"));
+}
+
+TEST(ContainerNamesTest, FileCheckAC3) {
+ TestFile(CONTAINER_AC3, GetTestDataFilePath("bear.ac3"));
+}
+
+TEST(ContainerNamesTest, FileCheckAAC) {
+ TestFile(CONTAINER_AAC, GetTestDataFilePath("bear.adts"));
+}
+
+TEST(ContainerNamesTest, FileCheckAIFF) {
+ TestFile(CONTAINER_AIFF, GetTestDataFilePath("bear.aiff"));
+}
+
+TEST(ContainerNamesTest, FileCheckASF) {
+ TestFile(CONTAINER_ASF, GetTestDataFilePath("bear.asf"));
+}
+
+TEST(ContainerNamesTest, FileCheckAVI) {
+ TestFile(CONTAINER_AVI, GetTestDataFilePath("bear.avi"));
+}
+
+TEST(ContainerNamesTest, FileCheckEAC3) {
+ TestFile(CONTAINER_EAC3, GetTestDataFilePath("bear.eac3"));
+}
+
+TEST(ContainerNamesTest, FileCheckFLAC) {
+ TestFile(CONTAINER_FLAC, GetTestDataFilePath("bear.flac"));
+}
+
+TEST(ContainerNamesTest, FileCheckFLV) {
+ TestFile(CONTAINER_FLV, GetTestDataFilePath("bear.flv"));
+}
+
+TEST(ContainerNamesTest, FileCheckH261) {
+ TestFile(CONTAINER_H261, GetTestDataFilePath("bear.h261"));
+}
+
+TEST(ContainerNamesTest, FileCheckH263) {
+ TestFile(CONTAINER_H263, GetTestDataFilePath("bear.h263"));
+}
+
+TEST(ContainerNamesTest, FileCheckMJPEG) {
+ TestFile(CONTAINER_MJPEG, GetTestDataFilePath("bear.mjpeg"));
+}
+
+TEST(ContainerNamesTest, FileCheckMPEG2PS) {
+ TestFile(CONTAINER_MPEG2PS, GetTestDataFilePath("bear.mpeg"));
+}
+
+TEST(ContainerNamesTest, FileCheckMPEG2TS) {
+ TestFile(CONTAINER_MPEG2TS, GetTestDataFilePath("bear.m2ts"));
+}
+
+TEST(ContainerNamesTest, FileCheckRM) {
+ TestFile(CONTAINER_RM, GetTestDataFilePath("bear.rm"));
+}
+
+TEST(ContainerNamesTest, FileCheckSWF) {
+ TestFile(CONTAINER_SWF, GetTestDataFilePath("bear.swf"));
+}
+
+// Try a few non containers.
+TEST(ContainerNamesTest, FileCheckUNKNOWN) {
+ TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("ten_byte_file"));
+ TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("README"));
+ TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("bali_640x360_P422.yuv"));
+ TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("bali_640x360_RGB24.rgb"));
+ TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("webm_vp8_track_entry"));
+}
+
+} // namespace container_names
+
+} // namespace media
diff --git a/chromium/media/base/data_buffer.cc b/chromium/media/base/data_buffer.cc
new file mode 100644
index 00000000000..d0b40eed1a5
--- /dev/null
+++ b/chromium/media/base/data_buffer.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/data_buffer.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+DataBuffer::DataBuffer(int buffer_size)
+ : buffer_size_(buffer_size),
+ data_size_(0) {
+ CHECK_GE(buffer_size, 0);
+ data_.reset(new uint8[buffer_size_]);
+}
+
+DataBuffer::DataBuffer(scoped_ptr<uint8[]> buffer, int buffer_size)
+ : data_(buffer.Pass()),
+ buffer_size_(buffer_size),
+ data_size_(buffer_size) {
+ CHECK(data_.get());
+ CHECK_GE(buffer_size, 0);
+}
+
+DataBuffer::DataBuffer(const uint8* data, int data_size)
+ : buffer_size_(data_size),
+ data_size_(data_size) {
+ if (!data) {
+ CHECK_EQ(data_size, 0);
+ return;
+ }
+
+ CHECK_GE(data_size, 0);
+ data_.reset(new uint8[buffer_size_]);
+ memcpy(data_.get(), data, data_size_);
+}
+
+DataBuffer::~DataBuffer() {}
+
+// static
+scoped_refptr<DataBuffer> DataBuffer::CopyFrom(const uint8* data, int size) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK(data);
+ return make_scoped_refptr(new DataBuffer(data, size));
+}
+
+// static
+scoped_refptr<DataBuffer> DataBuffer::CreateEOSBuffer() {
+ return make_scoped_refptr(new DataBuffer(NULL, 0));
+}
+} // namespace media
diff --git a/chromium/media/base/data_buffer.h b/chromium/media/base/data_buffer.h
new file mode 100644
index 00000000000..2e88faf57c9
--- /dev/null
+++ b/chromium/media/base/data_buffer.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DATA_BUFFER_H_
+#define MEDIA_BASE_DATA_BUFFER_H_
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A simple buffer that takes ownership of the given data pointer or allocates
+// as necessary.
+//
+// Unlike DecoderBuffer, allocations are assumed to be allocated with the
+// default memory allocator (i.e., new uint8[]).
+//
+// NOTE: It is illegal to call any method when end_of_stream() is true.
+class MEDIA_EXPORT DataBuffer : public base::RefCountedThreadSafe<DataBuffer> {
+ public:
+ // Allocates buffer of size |buffer_size| >= 0.
+ explicit DataBuffer(int buffer_size);
+
+ // Assumes valid data of size |buffer_size|.
+ DataBuffer(scoped_ptr<uint8[]> buffer, int buffer_size);
+
+ // Create a DataBuffer whose |data_| is copied from |data|.
+ //
+ // |data| must not be null and |size| must be >= 0.
+ static scoped_refptr<DataBuffer> CopyFrom(const uint8* data, int size);
+
+ // Create a DataBuffer indicating we've reached end of stream.
+ //
+ // Calling any method other than end_of_stream() on the resulting buffer
+ // is disallowed.
+ static scoped_refptr<DataBuffer> CreateEOSBuffer();
+
+ base::TimeDelta timestamp() const {
+ DCHECK(!end_of_stream());
+ return timestamp_;
+ }
+
+ void set_timestamp(const base::TimeDelta& timestamp) {
+ DCHECK(!end_of_stream());
+ timestamp_ = timestamp;
+ }
+
+ base::TimeDelta duration() const {
+ DCHECK(!end_of_stream());
+ return duration_;
+ }
+
+ void set_duration(const base::TimeDelta& duration) {
+ DCHECK(!end_of_stream());
+ duration_ = duration;
+ }
+
+ const uint8* data() const {
+ DCHECK(!end_of_stream());
+ return data_.get();
+ }
+
+ uint8* writable_data() {
+ DCHECK(!end_of_stream());
+ return data_.get();
+ }
+
+ // The size of valid data in bytes.
+ //
+ // Setting this value beyond the buffer size is disallowed.
+ int data_size() const {
+ DCHECK(!end_of_stream());
+ return data_size_;
+ }
+
+ void set_data_size(int data_size) {
+ DCHECK(!end_of_stream());
+ CHECK_LE(data_size, buffer_size_);
+ data_size_ = data_size;
+ }
+
+ // If there's no data in this buffer, it represents end of stream.
+ bool end_of_stream() const { return data_ == NULL; }
+
+ protected:
+ friend class base::RefCountedThreadSafe<DataBuffer>;
+
+ // Allocates buffer of size |data_size|, copies [data,data+data_size) to
+ // the allocated buffer and sets data size to |data_size|.
+ //
+ // If |data| is null an end of stream buffer is created.
+ DataBuffer(const uint8* data, int data_size);
+
+ virtual ~DataBuffer();
+
+ private:
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+
+ scoped_ptr<uint8[]> data_;
+ int buffer_size_;
+ int data_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DATA_BUFFER_H_
diff --git a/chromium/media/base/data_buffer_unittest.cc b/chromium/media/base/data_buffer_unittest.cc
new file mode 100644
index 00000000000..a97ba7dca5c
--- /dev/null
+++ b/chromium/media/base/data_buffer_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+#include "media/base/data_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(DataBufferTest, Constructor_ZeroSize) {
+ // Zero-sized buffers are valid. In practice they aren't used very much but it
+ // eliminates clients from worrying about null data pointers.
+ scoped_refptr<DataBuffer> buffer = new DataBuffer(0);
+ EXPECT_TRUE(buffer->data());
+ EXPECT_TRUE(buffer->writable_data());
+ EXPECT_EQ(0, buffer->data_size());
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(DataBufferTest, Constructor_NonZeroSize) {
+ // Buffer size should be set.
+ scoped_refptr<DataBuffer> buffer = new DataBuffer(10);
+ EXPECT_TRUE(buffer->data());
+ EXPECT_TRUE(buffer->writable_data());
+ EXPECT_EQ(0, buffer->data_size());
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(DataBufferTest, Constructor_ScopedArray) {
+ // Data should be passed and both data and buffer size should be set.
+ const int kSize = 8;
+ scoped_ptr<uint8[]> data(new uint8[kSize]);
+ const uint8* kData = data.get();
+
+ scoped_refptr<DataBuffer> buffer = new DataBuffer(data.Pass(), kSize);
+ EXPECT_TRUE(buffer->data());
+ EXPECT_TRUE(buffer->writable_data());
+ EXPECT_EQ(kData, buffer->data());
+ EXPECT_EQ(kSize, buffer->data_size());
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(DataBufferTest, CopyFrom) {
+ const uint8 kTestData[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
+ const int kTestDataSize = arraysize(kTestData);
+
+ scoped_refptr<DataBuffer> buffer =
+ DataBuffer::CopyFrom(kTestData, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, buffer->data_size());
+ EXPECT_FALSE(buffer->end_of_stream());
+
+ // Ensure we are copying the data, not just pointing to the original data.
+ EXPECT_EQ(0, memcmp(buffer->data(), kTestData, kTestDataSize));
+ buffer->writable_data()[0] = 0xFF;
+ EXPECT_NE(0, memcmp(buffer->data(), kTestData, kTestDataSize));
+}
+
+TEST(DataBufferTest, CreateEOSBuffer) {
+ scoped_refptr<DataBuffer> buffer = DataBuffer::CreateEOSBuffer();
+ EXPECT_TRUE(buffer->end_of_stream());
+}
+
+TEST(DataBufferTest, Timestamp) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
+ const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
+
+ scoped_refptr<DataBuffer> buffer = new DataBuffer(0);
+ EXPECT_TRUE(buffer->timestamp() == kZero);
+
+ buffer->set_timestamp(kTimestampA);
+ EXPECT_TRUE(buffer->timestamp() == kTimestampA);
+
+ buffer->set_timestamp(kTimestampB);
+ EXPECT_TRUE(buffer->timestamp() == kTimestampB);
+}
+
+TEST(DataBufferTest, Duration) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kDurationA = base::TimeDelta::FromMicroseconds(1337);
+ const base::TimeDelta kDurationB = base::TimeDelta::FromMicroseconds(1234);
+
+ scoped_refptr<DataBuffer> buffer = new DataBuffer(0);
+ EXPECT_TRUE(buffer->duration() == kZero);
+
+ buffer->set_duration(kDurationA);
+ EXPECT_TRUE(buffer->duration() == kDurationA);
+
+ buffer->set_duration(kDurationB);
+ EXPECT_TRUE(buffer->duration() == kDurationB);
+}
+
+TEST(DataBufferTest, ReadingWriting) {
+ const char kData[] = "hello";
+ const int kDataSize = arraysize(kData);
+ const char kNewData[] = "chromium";
+ const int kNewDataSize = arraysize(kNewData);
+
+ // Create a DataBuffer.
+ scoped_refptr<DataBuffer> buffer(new DataBuffer(kDataSize));
+ ASSERT_TRUE(buffer.get());
+
+ uint8* data = buffer->writable_data();
+ ASSERT_TRUE(data);
+ memcpy(data, kData, kDataSize);
+ buffer->set_data_size(kDataSize);
+ const uint8* read_only_data = buffer->data();
+ ASSERT_EQ(data, read_only_data);
+ ASSERT_EQ(0, memcmp(read_only_data, kData, kDataSize));
+ EXPECT_FALSE(buffer->end_of_stream());
+
+ scoped_refptr<DataBuffer> buffer2(new DataBuffer(kNewDataSize + 10));
+ data = buffer2->writable_data();
+ ASSERT_TRUE(data);
+ memcpy(data, kNewData, kNewDataSize);
+ buffer2->set_data_size(kNewDataSize);
+ read_only_data = buffer2->data();
+ EXPECT_EQ(kNewDataSize, buffer2->data_size());
+ ASSERT_EQ(data, read_only_data);
+ EXPECT_EQ(0, memcmp(read_only_data, kNewData, kNewDataSize));
+}
+
+} // namespace media
diff --git a/chromium/media/base/data_source.cc b/chromium/media/base/data_source.cc
new file mode 100644
index 00000000000..c25f9e73d62
--- /dev/null
+++ b/chromium/media/base/data_source.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/data_source.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+// static
+const int DataSource::kReadError = -1;
+
+DataSourceHost::~DataSourceHost() {}
+
+DataSource::DataSource() : host_(NULL) {}
+
+DataSource::~DataSource() {}
+
+void DataSource::set_host(DataSourceHost* host) {
+ DCHECK(host);
+ DCHECK(!host_);
+ host_ = host;
+}
+
+void DataSource::SetPlaybackRate(float playback_rate) {}
+
+DataSourceHost* DataSource::host() { return host_; }
+
+} // namespace media
diff --git a/chromium/media/base/data_source.h b/chromium/media/base/data_source.h
new file mode 100644
index 00000000000..def1d01f314
--- /dev/null
+++ b/chromium/media/base/data_source.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DATA_SOURCE_H_
+#define MEDIA_BASE_DATA_SOURCE_H_
+
+#include "base/callback.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT DataSourceHost {
+ public:
+ // Set the total size of the media file.
+ virtual void SetTotalBytes(int64 total_bytes) = 0;
+
+ // Notify the host that byte range [start,end] has been buffered.
+ // TODO(fischman): remove this method when demuxing is push-based instead of
+ // pull-based. http://crbug.com/131444
+ virtual void AddBufferedByteRange(int64 start, int64 end) = 0;
+
+ // Notify the host that time range [start,end] has been buffered.
+ virtual void AddBufferedTimeRange(base::TimeDelta start,
+ base::TimeDelta end) = 0;
+
+ protected:
+ virtual ~DataSourceHost();
+};
+
+class MEDIA_EXPORT DataSource {
+ public:
+ typedef base::Callback<void(int64, int64)> StatusCallback;
+ typedef base::Callback<void(int)> ReadCB;
+ static const int kReadError;
+
+ DataSource();
+ virtual ~DataSource();
+
+ virtual void set_host(DataSourceHost* host);
+
+ // Reads |size| bytes from |position| into |data|. And when the read is done
+ // or failed, |read_cb| is called with the number of bytes read or
+ // kReadError in case of error.
+ virtual void Read(int64 position, int size, uint8* data,
+ const DataSource::ReadCB& read_cb) = 0;
+
+ // Notifies the DataSource of a change in the current playback rate.
+ virtual void SetPlaybackRate(float playback_rate);
+
+ // Stops the DataSource. Once this is called all future Read() calls will
+ // return an error.
+ virtual void Stop(const base::Closure& callback) = 0;
+
+ // Returns true and the file size, false if the file size could not be
+ // retrieved.
+ virtual bool GetSize(int64* size_out) = 0;
+
+ // Returns true if we are performing streaming. In this case seeking is
+ // not possible.
+ virtual bool IsStreaming() = 0;
+
+ // Notify the DataSource of the bitrate of the media.
+ // Values of |bitrate| <= 0 are invalid and should be ignored.
+ virtual void SetBitrate(int bitrate) = 0;
+
+ protected:
+ DataSourceHost* host();
+
+ private:
+ DataSourceHost* host_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataSource);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DATA_SOURCE_H_
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
new file mode 100644
index 00000000000..9eaa128ceb2
--- /dev/null
+++ b/chromium/media/base/decoder_buffer.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/decoder_buffer.h"
+
+#include "base/logging.h"
+#include "media/base/decrypt_config.h"
+
+namespace media {
+
+DecoderBuffer::DecoderBuffer(int size)
+ : size_(size),
+ side_data_size_(0) {
+ Initialize();
+}
+
+DecoderBuffer::DecoderBuffer(const uint8* data, int size,
+ const uint8* side_data, int side_data_size)
+ : size_(size),
+ side_data_size_(side_data_size) {
+ if (!data) {
+ CHECK_EQ(size_, 0);
+ CHECK(!side_data);
+ return;
+ }
+
+ Initialize();
+ memcpy(data_.get(), data, size_);
+ if (side_data)
+ memcpy(side_data_.get(), side_data, side_data_size_);
+}
+
+DecoderBuffer::~DecoderBuffer() {}
+
+void DecoderBuffer::Initialize() {
+ CHECK_GE(size_, 0);
+ data_.reset(reinterpret_cast<uint8*>(
+ base::AlignedAlloc(size_ + kPaddingSize, kAlignmentSize)));
+ memset(data_.get() + size_, 0, kPaddingSize);
+ if (side_data_size_ > 0) {
+ side_data_.reset(reinterpret_cast<uint8*>(
+ base::AlignedAlloc(side_data_size_ + kPaddingSize, kAlignmentSize)));
+ memset(side_data_.get() + side_data_size_, 0, kPaddingSize);
+ }
+}
+
+// static
+scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data,
+ int data_size) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK(data);
+ return make_scoped_refptr(new DecoderBuffer(data, data_size, NULL, 0));
+}
+
+// static
+scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data,
+ int data_size,
+ const uint8* side_data,
+ int side_data_size) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK(data);
+ CHECK(side_data);
+ return make_scoped_refptr(new DecoderBuffer(data, data_size,
+ side_data, side_data_size));
+}
+
+// static
+scoped_refptr<DecoderBuffer> DecoderBuffer::CreateEOSBuffer() {
+ return make_scoped_refptr(new DecoderBuffer(NULL, 0, NULL, 0));
+}
+
+std::string DecoderBuffer::AsHumanReadableString() {
+ if (end_of_stream()) {
+ return "end of stream";
+ }
+
+ std::ostringstream s;
+ s << "timestamp: " << timestamp_.InMicroseconds()
+ << " duration: " << duration_.InMicroseconds()
+ << " size: " << size_
+ << " side_data_size: " << side_data_size_
+ << " encrypted: " << (decrypt_config_ != NULL);
+ return s.str();
+}
+
+} // namespace media
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
new file mode 100644
index 00000000000..6cf519f4c1d
--- /dev/null
+++ b/chromium/media/base/decoder_buffer.h
@@ -0,0 +1,154 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DECODER_BUFFER_H_
+#define MEDIA_BASE_DECODER_BUFFER_H_
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/base/decrypt_config.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A specialized buffer for interfacing with audio / video decoders.
+//
+// Specifically ensures that data is aligned and padded as necessary by the
+// underlying decoding framework. On desktop platforms this means memory is
+// allocated using FFmpeg with particular alignment and padding requirements.
+//
+// Also includes decoder specific functionality for decryption.
+//
+// NOTE: It is illegal to call any method when end_of_stream() is true.
+class MEDIA_EXPORT DecoderBuffer
+ : public base::RefCountedThreadSafe<DecoderBuffer> {
+ public:
+ enum {
+ kPaddingSize = 16,
+#if defined(ARCH_CPU_ARM_FAMILY)
+ kAlignmentSize = 16
+#else
+ kAlignmentSize = 32
+#endif
+ };
+
+ // Allocates buffer with |size| >= 0. Buffer will be padded and aligned
+ // as necessary.
+ explicit DecoderBuffer(int size);
+
+ // Create a DecoderBuffer whose |data_| is copied from |data|. Buffer will be
+ // padded and aligned as necessary. |data| must not be NULL and |size| >= 0.
+ static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size);
+
+ // Create a DecoderBuffer whose |data_| is copied from |data| and |side_data_|
+ // is copied from |side_data|. Buffers will be padded and aligned as necessary
+ // Data pointers must not be NULL and sizes must be >= 0.
+ static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size,
+ const uint8* side_data,
+ int side_data_size);
+
+ // Create a DecoderBuffer indicating we've reached end of stream.
+ //
+ // Calling any method other than end_of_stream() on the resulting buffer
+ // is disallowed.
+ static scoped_refptr<DecoderBuffer> CreateEOSBuffer();
+
+ base::TimeDelta timestamp() const {
+ DCHECK(!end_of_stream());
+ return timestamp_;
+ }
+
+ void set_timestamp(const base::TimeDelta& timestamp) {
+ DCHECK(!end_of_stream());
+ timestamp_ = timestamp;
+ }
+
+ base::TimeDelta duration() const {
+ DCHECK(!end_of_stream());
+ return duration_;
+ }
+
+ void set_duration(const base::TimeDelta& duration) {
+ DCHECK(!end_of_stream());
+ duration_ = duration;
+ }
+
+ const uint8* data() const {
+ DCHECK(!end_of_stream());
+ return data_.get();
+ }
+
+ uint8* writable_data() const {
+ DCHECK(!end_of_stream());
+ return data_.get();
+ }
+
+ int data_size() const {
+ DCHECK(!end_of_stream());
+ return size_;
+ }
+
+ const uint8* side_data() const {
+ DCHECK(!end_of_stream());
+ return side_data_.get();
+ }
+
+ int side_data_size() const {
+ DCHECK(!end_of_stream());
+ return side_data_size_;
+ }
+
+ const DecryptConfig* decrypt_config() const {
+ DCHECK(!end_of_stream());
+ return decrypt_config_.get();
+ }
+
+ void set_decrypt_config(scoped_ptr<DecryptConfig> decrypt_config) {
+ DCHECK(!end_of_stream());
+ decrypt_config_ = decrypt_config.Pass();
+ }
+
+ // If there's no data in this buffer, it represents end of stream.
+ bool end_of_stream() const {
+ return data_ == NULL;
+ }
+
+ // Returns a human-readable string describing |*this|.
+ std::string AsHumanReadableString();
+
+ protected:
+ friend class base::RefCountedThreadSafe<DecoderBuffer>;
+
+ // Allocates a buffer of size |size| >= 0 and copies |data| into it. Buffer
+ // will be padded and aligned as necessary. If |data| is NULL then |data_| is
+ // set to NULL and |buffer_size_| to 0.
+ DecoderBuffer(const uint8* data, int size,
+ const uint8* side_data, int side_data_size);
+ virtual ~DecoderBuffer();
+
+ private:
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+
+ int size_;
+ scoped_ptr<uint8, base::ScopedPtrAlignedFree> data_;
+ int side_data_size_;
+ scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_;
+ scoped_ptr<DecryptConfig> decrypt_config_;
+
+ // Constructor helper method for memory allocations.
+ void Initialize();
+
+ DISALLOW_COPY_AND_ASSIGN(DecoderBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DECODER_BUFFER_H_
diff --git a/chromium/media/base/decoder_buffer_queue.cc b/chromium/media/base/decoder_buffer_queue.cc
new file mode 100644
index 00000000000..d0486cbf939
--- /dev/null
+++ b/chromium/media/base/decoder_buffer_queue.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/decoder_buffer_queue.h"
+
+#include "base/logging.h"
+#include "media/base/buffers.h"
+#include "media/base/decoder_buffer.h"
+
+namespace media {
+
+DecoderBufferQueue::DecoderBufferQueue()
+ : earliest_valid_timestamp_(kNoTimestamp()) {
+}
+
+DecoderBufferQueue::~DecoderBufferQueue() {}
+
+void DecoderBufferQueue::Push(const scoped_refptr<DecoderBuffer>& buffer) {
+ CHECK(!buffer->end_of_stream());
+
+ queue_.push_back(buffer);
+
+ // TODO(scherkus): FFmpeg returns some packets with no timestamp after
+ // seeking. Fix and turn this into CHECK(). See http://crbug.com/162192
+ if (buffer->timestamp() == kNoTimestamp()) {
+ DVLOG(1) << "Buffer has no timestamp";
+ return;
+ }
+
+ if (earliest_valid_timestamp_ == kNoTimestamp()) {
+ earliest_valid_timestamp_ = buffer->timestamp();
+ }
+
+ if (buffer->timestamp() < earliest_valid_timestamp_) {
+ DVLOG(1)
+ << "Out of order timestamps: "
+ << buffer->timestamp().InMicroseconds()
+ << " vs. "
+ << earliest_valid_timestamp_.InMicroseconds();
+ return;
+ }
+
+ earliest_valid_timestamp_ = buffer->timestamp();
+ in_order_queue_.push_back(buffer);
+}
+
+scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
+ scoped_refptr<DecoderBuffer> buffer = queue_.front();
+ queue_.pop_front();
+
+ if (!in_order_queue_.empty() &&
+ in_order_queue_.front().get() == buffer.get()) {
+ in_order_queue_.pop_front();
+ }
+
+ return buffer;
+}
+
+void DecoderBufferQueue::Clear() {
+ queue_.clear();
+ in_order_queue_.clear();
+ earliest_valid_timestamp_ = kNoTimestamp();
+}
+
+bool DecoderBufferQueue::IsEmpty() {
+ return queue_.empty();
+}
+
+base::TimeDelta DecoderBufferQueue::Duration() {
+ if (in_order_queue_.size() < 2)
+ return base::TimeDelta();
+
+ base::TimeDelta start = in_order_queue_.front()->timestamp();
+ base::TimeDelta end = in_order_queue_.back()->timestamp();
+ return end - start;
+}
+
+} // namespace media
diff --git a/chromium/media/base/decoder_buffer_queue.h b/chromium/media/base/decoder_buffer_queue.h
new file mode 100644
index 00000000000..938db63123e
--- /dev/null
+++ b/chromium/media/base/decoder_buffer_queue.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
+#define MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
+
+#include <deque>
+
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class DecoderBuffer;
+
+// Maintains a queue of DecoderBuffers in increasing timestamp order.
+//
+// Individual buffer durations are ignored when calculating the duration of the
+// queue i.e., the queue must have at least 2 in-order buffers to calculate
+// duration.
+//
+// Not thread safe: access must be externally synchronized.
+class MEDIA_EXPORT DecoderBufferQueue {
+ public:
+ DecoderBufferQueue();
+ ~DecoderBufferQueue();
+
+ // Push |buffer| to the end of the queue. If |buffer| is queued out of order
+ // it will be excluded from duration calculations.
+ //
+ // It is invalid to push an end-of-stream |buffer|.
+ void Push(const scoped_refptr<DecoderBuffer>& buffer);
+
+ // Pops a DecoderBuffer from the front of the queue.
+ //
+ // It is invalid to call Pop() on an empty queue.
+ scoped_refptr<DecoderBuffer> Pop();
+
+ // Removes all queued buffers.
+ void Clear();
+
+ // Returns true if this queue is empty.
+ bool IsEmpty();
+
+ // Returns the duration of encoded data stored in this queue as measured by
+ // the timestamps of the earliest and latest buffers, ignoring out of order
+ // buffers.
+ //
+ // Returns zero if the queue is empty.
+ base::TimeDelta Duration();
+
+ private:
+ typedef std::deque<scoped_refptr<DecoderBuffer> > Queue;
+ Queue queue_;
+
+ // A subset of |queue_| that contains buffers that are in strictly
+ // increasing timestamp order. Used to calculate Duration() while ignoring
+ // out-of-order buffers.
+ Queue in_order_queue_;
+
+ base::TimeDelta earliest_valid_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecoderBufferQueue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
diff --git a/chromium/media/base/decoder_buffer_queue_unittest.cc b/chromium/media/base/decoder_buffer_queue_unittest.cc
new file mode 100644
index 00000000000..32e62db06b1
--- /dev/null
+++ b/chromium/media/base/decoder_buffer_queue_unittest.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/buffers.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/decoder_buffer_queue.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static base::TimeDelta ToTimeDelta(int seconds) {
+ if (seconds < 0)
+ return kNoTimestamp();
+ return base::TimeDelta::FromSeconds(seconds);
+}
+
+// Helper to create buffers with specified timestamp in seconds.
+//
+// Negative numbers will be converted to kNoTimestamp();
+static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp) {
+ scoped_refptr<DecoderBuffer> buffer = new DecoderBuffer(0);
+ buffer->set_timestamp(ToTimeDelta(timestamp));
+ buffer->set_duration(ToTimeDelta(0));
+ return buffer;
+}
+
+TEST(DecoderBufferQueueTest, IsEmpty) {
+ DecoderBufferQueue queue;
+ EXPECT_TRUE(queue.IsEmpty());
+
+ queue.Push(CreateBuffer(0));
+ EXPECT_FALSE(queue.IsEmpty());
+}
+
+TEST(DecoderBufferQueueTest, Clear) {
+ DecoderBufferQueue queue;
+ queue.Push(CreateBuffer(0));
+ queue.Push(CreateBuffer(1));
+ EXPECT_FALSE(queue.IsEmpty());
+ EXPECT_EQ(1, queue.Duration().InSeconds());
+
+ queue.Clear();
+ EXPECT_TRUE(queue.IsEmpty());
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+}
+
+TEST(DecoderBufferQueueTest, Duration) {
+ DecoderBufferQueue queue;
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(0));
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(1));
+ EXPECT_EQ(1, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(2));
+ EXPECT_EQ(2, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(4));
+ EXPECT_EQ(4, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(3, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(2, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+}
+
+TEST(DecoderBufferQueueTest, Duration_OutOfOrder) {
+ DecoderBufferQueue queue;
+ queue.Push(CreateBuffer(10));
+ queue.Push(CreateBuffer(12));
+ EXPECT_EQ(2, queue.Duration().InSeconds());
+
+ // Out of order: duration shouldn't change.
+ queue.Push(CreateBuffer(8));
+ EXPECT_EQ(2, queue.Duration().InSeconds());
+
+ // Removing first buffer should leave the second buffer as the only buffer
+ // included in the duration calculation.
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ // Removing second buffer leaves the out-of-order buffer. It shouldn't be
+ // included in duration calculations.
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ // Push a still-too-early buffer. It shouldn't be included in duration
+ // calculations.
+ queue.Push(CreateBuffer(11));
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ // Push a buffer that's after the earliest valid time. It's a singular valid
+ // buffer so duration is still zero.
+ queue.Push(CreateBuffer(14));
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ // Push a second valid buffer. We should now have a duration.
+ queue.Push(CreateBuffer(17));
+ EXPECT_EQ(3, queue.Duration().InSeconds());
+}
+
+TEST(DecoderBufferQueueTest, Duration_NoTimestamp) {
+ // Buffers with no timestamp don't affect duration.
+ DecoderBufferQueue queue;
+ queue.Push(CreateBuffer(0));
+ queue.Push(CreateBuffer(4));
+ EXPECT_EQ(4, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(-1));
+ EXPECT_EQ(4, queue.Duration().InSeconds());
+
+ queue.Push(CreateBuffer(6));
+ EXPECT_EQ(6, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(2, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+
+ queue.Pop();
+ EXPECT_EQ(0, queue.Duration().InSeconds());
+}
+
+} // namespace media
diff --git a/chromium/media/base/decoder_buffer_unittest.cc b/chromium/media/base/decoder_buffer_unittest.cc
new file mode 100644
index 00000000000..c5a03b78450
--- /dev/null
+++ b/chromium/media/base/decoder_buffer_unittest.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+#include "media/base/decoder_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(DecoderBufferTest, Constructors) {
+ scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
+ EXPECT_TRUE(buffer->data());
+ EXPECT_EQ(0, buffer->data_size());
+ EXPECT_FALSE(buffer->end_of_stream());
+
+ const int kTestSize = 10;
+ scoped_refptr<DecoderBuffer> buffer3(new DecoderBuffer(kTestSize));
+ ASSERT_TRUE(buffer3.get());
+ EXPECT_EQ(kTestSize, buffer3->data_size());
+}
+
+TEST(DecoderBufferTest, CreateEOSBuffer) {
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CreateEOSBuffer());
+ EXPECT_TRUE(buffer->end_of_stream());
+}
+
+TEST(DecoderBufferTest, CopyFrom) {
+ const uint8 kData[] = "hello";
+ const int kDataSize = arraysize(kData);
+ scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ ASSERT_TRUE(buffer2.get());
+ EXPECT_NE(kData, buffer2->data());
+ EXPECT_EQ(buffer2->data_size(), kDataSize);
+ EXPECT_EQ(0, memcmp(buffer2->data(), kData, kDataSize));
+ EXPECT_FALSE(buffer2->end_of_stream());
+ scoped_refptr<DecoderBuffer> buffer3(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize,
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ ASSERT_TRUE(buffer3.get());
+ EXPECT_NE(kData, buffer3->data());
+ EXPECT_EQ(buffer3->data_size(), kDataSize);
+ EXPECT_EQ(0, memcmp(buffer3->data(), kData, kDataSize));
+ EXPECT_NE(kData, buffer3->side_data());
+ EXPECT_EQ(buffer3->side_data_size(), kDataSize);
+ EXPECT_EQ(0, memcmp(buffer3->side_data(), kData, kDataSize));
+ EXPECT_FALSE(buffer3->end_of_stream());
+}
+
+#if !defined(OS_ANDROID)
+TEST(DecoderBufferTest, PaddingAlignment) {
+ const uint8 kData[] = "hello";
+ const int kDataSize = arraysize(kData);
+ scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ ASSERT_TRUE(buffer2.get());
+
+ // Padding data should always be zeroed.
+ for(int i = 0; i < DecoderBuffer::kPaddingSize; i++)
+ EXPECT_EQ((buffer2->data() + kDataSize)[i], 0);
+
+ // If the data is padded correctly we should be able to read and write past
+ // the end of the data by DecoderBuffer::kPaddingSize bytes without crashing
+ // or Valgrind/ASAN throwing errors.
+ const uint8 kFillChar = 0xFF;
+ memset(
+ buffer2->writable_data() + kDataSize, kFillChar,
+ DecoderBuffer::kPaddingSize);
+ for(int i = 0; i < DecoderBuffer::kPaddingSize; i++)
+ EXPECT_EQ((buffer2->data() + kDataSize)[i], kFillChar);
+
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
+ buffer2->data()) & (DecoderBuffer::kAlignmentSize - 1));
+}
+#endif
+
+TEST(DecoderBufferTest, ReadingWriting) {
+ const char kData[] = "hello";
+ const int kDataSize = arraysize(kData);
+
+ scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(kDataSize));
+ ASSERT_TRUE(buffer.get());
+
+ uint8* data = buffer->writable_data();
+ ASSERT_TRUE(data);
+ ASSERT_EQ(kDataSize, buffer->data_size());
+ memcpy(data, kData, kDataSize);
+ const uint8* read_only_data = buffer->data();
+ ASSERT_EQ(data, read_only_data);
+ ASSERT_EQ(0, memcmp(read_only_data, kData, kDataSize));
+ EXPECT_FALSE(buffer->end_of_stream());
+}
+
+TEST(DecoderBufferTest, GetDecryptConfig) {
+ scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
+ EXPECT_FALSE(buffer->decrypt_config());
+}
+
+} // namespace media
diff --git a/chromium/media/base/decrypt_config.cc b/chromium/media/base/decrypt_config.cc
new file mode 100644
index 00000000000..53e20143e1b
--- /dev/null
+++ b/chromium/media/base/decrypt_config.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/decrypt_config.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+DecryptConfig::DecryptConfig(const std::string& key_id,
+ const std::string& iv,
+ const int data_offset,
+ const std::vector<SubsampleEntry>& subsamples)
+ : key_id_(key_id),
+ iv_(iv),
+ data_offset_(data_offset),
+ subsamples_(subsamples) {
+ CHECK_GT(key_id.size(), 0u);
+ CHECK(iv.size() == static_cast<size_t>(DecryptConfig::kDecryptionKeySize) ||
+ iv.empty());
+ CHECK_GE(data_offset, 0);
+}
+
+DecryptConfig::~DecryptConfig() {}
+
+} // namespace media
diff --git a/chromium/media/base/decrypt_config.h b/chromium/media/base/decrypt_config.h
new file mode 100644
index 00000000000..be0bb4d61b1
--- /dev/null
+++ b/chromium/media/base/decrypt_config.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DECRYPT_CONFIG_H_
+#define MEDIA_BASE_DECRYPT_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// The Common Encryption spec provides for subsample encryption, where portions
+// of a sample are set in cleartext. A SubsampleEntry specifies the number of
+// clear and encrypted bytes in each subsample. For decryption, all of the
+// encrypted bytes in a sample should be considered a single logical stream,
+// regardless of how they are divided into subsamples, and the clear bytes
+// should not be considered as part of decryption. This is logically equivalent
+// to concatenating all 'cypher_bytes' portions of subsamples, decrypting that
+// result, and then copying each byte from the decrypted block over the
+// position of the corresponding encrypted byte.
+struct SubsampleEntry {
+ uint32 clear_bytes;
+ uint32 cypher_bytes;
+};
+
+// Contains all information that a decryptor needs to decrypt a media sample.
+class MEDIA_EXPORT DecryptConfig {
+ public:
+ // Keys are always 128 bits.
+ static const int kDecryptionKeySize = 16;
+
+ // |key_id| is the ID that references the decryption key for this sample.
+ // |iv| is the initialization vector defined by the encrypted format.
+ // Currently |iv| must be 16 bytes as defined by WebM and ISO. Or must be
+ // empty which signals an unencrypted frame.
+ // |data_offset| is the amount of data that should be discarded from the
+ // head of the sample buffer before applying subsample information. A
+ // decrypted buffer will be shorter than an encrypted buffer by this amount.
+ // |subsamples| defines the clear and encrypted portions of the sample as
+ // described above. A decrypted buffer will be equal in size to the sum
+ // of the subsample sizes.
+ //
+ // |data_offset| is applied before |subsamples|.
+ DecryptConfig(const std::string& key_id,
+ const std::string& iv,
+ const int data_offset,
+ const std::vector<SubsampleEntry>& subsamples);
+ ~DecryptConfig();
+
+ const std::string& key_id() const { return key_id_; }
+ const std::string& iv() const { return iv_; }
+ int data_offset() const { return data_offset_; }
+ const std::vector<SubsampleEntry>& subsamples() const { return subsamples_; }
+
+ private:
+ const std::string key_id_;
+
+ // Initialization vector.
+ const std::string iv_;
+
+ // TODO(fgalligan): Remove |data_offset_| if there is no plan to use it in
+ // the future.
+ // Amount of data to be discarded before applying subsample information.
+ const int data_offset_;
+
+ // Subsample information. May be empty for some formats, meaning entire frame
+ // (less data ignored by data_offset_) is encrypted.
+ const std::vector<SubsampleEntry> subsamples_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecryptConfig);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DECRYPT_CONFIG_H_
diff --git a/chromium/media/base/decryptor.cc b/chromium/media/base/decryptor.cc
new file mode 100644
index 00000000000..e9b232ded71
--- /dev/null
+++ b/chromium/media/base/decryptor.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/decryptor.h"
+
+namespace media {
+
+Decryptor::Decryptor() {}
+
+Decryptor::~Decryptor() {}
+
+} // namespace media
diff --git a/chromium/media/base/decryptor.h b/chromium/media/base/decryptor.h
new file mode 100644
index 00000000000..2c2cf793639
--- /dev/null
+++ b/chromium/media/base/decryptor.h
@@ -0,0 +1,178 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DECRYPTOR_H_
+#define MEDIA_BASE_DECRYPTOR_H_
+
+#include <list>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioDecoderConfig;
+class DecoderBuffer;
+class VideoDecoderConfig;
+class VideoFrame;
+
+// Decrypts (and decodes) encrypted buffer.
+//
+// All methods are called on the (video/audio) decoder thread. Decryptor
+// implementations must be thread safe when methods are called this way.
+// Depending on the implementation callbacks may be fired synchronously or
+// asynchronously.
+class MEDIA_EXPORT Decryptor {
+ public:
+ // TODO(xhwang): Replace kError with kDecryptError and kDecodeError.
+ // TODO(xhwang): Replace kNeedMoreData with kNotEnoughData.
+ enum Status {
+ kSuccess, // Decryption successfully completed. Decrypted buffer ready.
+ kNoKey, // No key is available to decrypt.
+ kNeedMoreData, // Decoder needs more data to produce a frame.
+ kError // Key is available but an error occurred during decryption.
+ };
+
+ // TODO(xhwang): Unify this with DemuxerStream::Type.
+ enum StreamType {
+ kAudio,
+ kVideo
+ };
+
+ Decryptor();
+ virtual ~Decryptor();
+
+ // Indicates that a new key has been added to the MediaKeys object associated
+ // with the Decryptor.
+ typedef base::Callback<void()> NewKeyCB;
+
+ // Registers a NewKeyCB which should be called when a new key is added to the
+ // decryptor. Only one NewKeyCB can be registered for one |stream_type|.
+ // If this function is called multiple times for the same |stream_type|, the
+ // previously registered callback will be replaced. In other words,
+ // registering a null callback cancels the originally registered callback.
+ virtual void RegisterNewKeyCB(StreamType stream_type,
+ const NewKeyCB& key_added_cb) = 0;
+
+ // Indicates completion of a decryption operation.
+ //
+ // First parameter: The status of the decryption operation.
+ // - Set to kSuccess if the encrypted buffer is successfully decrypted and
+ // the decrypted buffer is ready to be read.
+ // - Set to kNoKey if no decryption key is available to decrypt the encrypted
+ // buffer. In this case the decrypted buffer must be NULL.
+ // - Set to kError if unexpected error has occurred. In this case the
+ // decrypted buffer must be NULL.
+ // - This parameter should not be set to kNeedMoreData.
+ // Second parameter: The decrypted buffer.
+ typedef base::Callback<void(Status,
+ const scoped_refptr<DecoderBuffer>&)> DecryptCB;
+
+ // Decrypts the |encrypted| buffer. The decrypt status and decrypted buffer
+ // are returned via the provided callback |decrypt_cb|. The |encrypted| buffer
+ // must not be NULL.
+ // Decrypt() should not be called until any previous DecryptCB of the same
+ // |stream_type| has completed. Thus, only one DecryptCB may be pending at
+ // a time for a given |stream_type|.
+ virtual void Decrypt(StreamType stream_type,
+ const scoped_refptr<DecoderBuffer>& encrypted,
+ const DecryptCB& decrypt_cb) = 0;
+
+ // Cancels the scheduled decryption operation for |stream_type| and fires the
+ // pending DecryptCB immediately with kSuccess and NULL.
+ // Decrypt() should not be called again before the pending DecryptCB for the
+ // same |stream_type| is fired.
+ virtual void CancelDecrypt(StreamType stream_type) = 0;
+
+ // Indicates completion of audio/video decoder initialization.
+ //
+ // First Parameter: Indicates initialization success.
+ // - Set to true if initialization was successful. False if an error occurred.
+ typedef base::Callback<void(bool)> DecoderInitCB;
+
+ // Initializes a decoder with the given |config|, executing the |init_cb|
+ // upon completion.
+ virtual void InitializeAudioDecoder(const AudioDecoderConfig& config,
+ const DecoderInitCB& init_cb) = 0;
+ virtual void InitializeVideoDecoder(const VideoDecoderConfig& config,
+ const DecoderInitCB& init_cb) = 0;
+
+ // Helper structure for managing multiple decoded audio buffers per input.
+ // TODO(xhwang): Rename this to AudioFrames.
+ typedef std::list<scoped_refptr<AudioBuffer> > AudioBuffers;
+
+ // Indicates completion of audio/video decrypt-and-decode operation.
+ //
+ // First parameter: The status of the decrypt-and-decode operation.
+ // - Set to kSuccess if the encrypted buffer is successfully decrypted and
+ // decoded. In this case, the decoded frame/buffers can be/contain:
+ // 1) NULL, which means the operation has been aborted.
+ // 2) End-of-stream (EOS) frame, which means that the decoder has hit EOS,
+ // flushed all internal buffers and cannot produce more video frames.
+ // 3) Decrypted and decoded video frame or audio buffer.
+ // - Set to kNoKey if no decryption key is available to decrypt the encrypted
+ // buffer. In this case the returned frame(s) must be NULL/empty.
+ // - Set to kNeedMoreData if more data is needed to produce a video frame. In
+ // this case the returned frame(s) must be NULL/empty.
+ // - Set to kError if unexpected error has occurred. In this case the
+ // returned frame(s) must be NULL/empty.
+ // Second parameter: The decoded video frame or audio buffers.
+ typedef base::Callback<void(Status, const AudioBuffers&)> AudioDecodeCB;
+ typedef base::Callback<void(Status,
+ const scoped_refptr<VideoFrame>&)> VideoDecodeCB;
+
+ // Decrypts and decodes the |encrypted| buffer. The status and the decrypted
+ // buffer are returned via the provided callback.
+ // The |encrypted| buffer must not be NULL.
+ // At end-of-stream, this method should be called repeatedly with
+ // end-of-stream DecoderBuffer until no frame/buffer can be produced.
+ // These methods can only be called after the corresponding decoder has
+ // been successfully initialized.
+ virtual void DecryptAndDecodeAudio(
+ const scoped_refptr<DecoderBuffer>& encrypted,
+ const AudioDecodeCB& audio_decode_cb) = 0;
+ virtual void DecryptAndDecodeVideo(
+ const scoped_refptr<DecoderBuffer>& encrypted,
+ const VideoDecodeCB& video_decode_cb) = 0;
+
+ // Resets the decoder to an initialized clean state, cancels any scheduled
+ // decrypt-and-decode operations, and fires any pending
+ // AudioDecodeCB/VideoDecodeCB immediately with kError and NULL.
+ // This method can only be called after the corresponding decoder has been
+ // successfully initialized.
+ virtual void ResetDecoder(StreamType stream_type) = 0;
+
+ // Releases decoder resources, deinitializes the decoder, cancels any
+ // scheduled initialization or decrypt-and-decode operations, and fires
+ // any pending DecoderInitCB/AudioDecodeCB/VideoDecodeCB immediately.
+ // DecoderInitCB should be fired with false. AudioDecodeCB/VideoDecodeCB
+ // should be fired with kError.
+ // This method can be called any time after Initialize{Audio|Video}Decoder()
+ // has been called (with the correct stream type).
+ // After this operation, the decoder is set to an uninitialized state.
+ // The decoder can be reinitialized after it is uninitialized.
+ virtual void DeinitializeDecoder(StreamType stream_type) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Decryptor);
+};
+
+// Callback to notify that a decryptor is ready.
+typedef base::Callback<void(Decryptor*)> DecryptorReadyCB;
+
+// Callback to set/cancel a DecryptorReadyCB.
+// Calling this callback with a non-null callback registers decryptor ready
+// notification. When the decryptor is ready, notification will be sent
+// through the provided callback.
+// Calling this callback with a null callback cancels previously registered
+// decryptor ready notification. Any previously provided callback will be
+// fired immediately with NULL.
+typedef base::Callback<void(const DecryptorReadyCB&)> SetDecryptorReadyCB;
+
+} // namespace media
+
+#endif // MEDIA_BASE_DECRYPTOR_H_
diff --git a/chromium/media/base/demuxer.cc b/chromium/media/base/demuxer.cc
new file mode 100644
index 00000000000..6cd4e29a481
--- /dev/null
+++ b/chromium/media/base/demuxer.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/demuxer.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+DemuxerHost::~DemuxerHost() {}
+
+Demuxer::Demuxer() {}
+
+Demuxer::~Demuxer() {}
+
+void Demuxer::SetPlaybackRate(float playback_rate) {}
+
+void Demuxer::Seek(base::TimeDelta time, const PipelineStatusCB& status_cb) {
+ DCHECK(!status_cb.is_null());
+ status_cb.Run(PIPELINE_OK);
+}
+
+void Demuxer::Stop(const base::Closure& callback) {
+ DCHECK(!callback.is_null());
+ callback.Run();
+}
+
+void Demuxer::OnAudioRendererDisabled() {}
+
+} // namespace media
diff --git a/chromium/media/base/demuxer.h b/chromium/media/base/demuxer.h
new file mode 100644
index 00000000000..6a91aab896c
--- /dev/null
+++ b/chromium/media/base/demuxer.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DEMUXER_H_
+#define MEDIA_BASE_DEMUXER_H_
+
+#include "base/time/time.h"
+#include "media/base/data_source.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+
+namespace media {
+
+class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
+ public:
+ // Sets the duration of the media in microseconds.
+ // Duration may be kInfiniteDuration() if the duration is not known.
+ virtual void SetDuration(base::TimeDelta duration) = 0;
+
+ // Stops execution of the pipeline due to a fatal error. Do not call this
+ // method with PIPELINE_OK.
+ virtual void OnDemuxerError(PipelineStatus error) = 0;
+
+ protected:
+ virtual ~DemuxerHost();
+};
+
+class MEDIA_EXPORT Demuxer {
+ public:
+ Demuxer();
+ virtual ~Demuxer();
+
+ // Completes initialization of the demuxer.
+ //
+ // The demuxer does not own |host| as it is guaranteed to outlive the
+ // lifetime of the demuxer. Don't delete it!
+ virtual void Initialize(DemuxerHost* host,
+ const PipelineStatusCB& status_cb) = 0;
+
+ // The pipeline playback rate has been changed. Demuxers may implement this
+ // method if they need to respond to this call.
+ virtual void SetPlaybackRate(float playback_rate);
+
+ // Carry out any actions required to seek to the given time, executing the
+ // callback upon completion.
+ virtual void Seek(base::TimeDelta time, const PipelineStatusCB& status_cb);
+
+ // The pipeline is being stopped either as a result of an error or because
+ // the client called Stop().
+ virtual void Stop(const base::Closure& callback);
+
+ // This method is called from the pipeline when the audio renderer
+ // is disabled. Demuxers can ignore the notification if they do not
+ // need to react to this event.
+ //
+ // TODO(acolwell): Change to generic DisableStream(DemuxerStream::Type).
+ // TODO(scherkus): this might not be needed http://crbug.com/234708
+ virtual void OnAudioRendererDisabled();
+
+ // Returns the given stream type, or NULL if that type is not present.
+ virtual DemuxerStream* GetStream(DemuxerStream::Type type) = 0;
+
+ // Returns the starting time for the media file.
+ virtual base::TimeDelta GetStartTime() const = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Demuxer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DEMUXER_H_
diff --git a/chromium/media/base/demuxer_stream.cc b/chromium/media/base/demuxer_stream.cc
new file mode 100644
index 00000000000..daede6553d1
--- /dev/null
+++ b/chromium/media/base/demuxer_stream.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/demuxer_stream.h"
+
+namespace media {
+
+DemuxerStream::~DemuxerStream() {}
+
+} // namespace media
diff --git a/chromium/media/base/demuxer_stream.h b/chromium/media/base/demuxer_stream.h
new file mode 100644
index 00000000000..bb4534475ed
--- /dev/null
+++ b/chromium/media/base/demuxer_stream.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DEMUXER_STREAM_H_
+#define MEDIA_BASE_DEMUXER_STREAM_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioDecoderConfig;
+class DecoderBuffer;
+class VideoDecoderConfig;
+
+class MEDIA_EXPORT DemuxerStream {
+ public:
+ enum Type {
+ UNKNOWN,
+ AUDIO,
+ VIDEO,
+ NUM_TYPES, // Always keep this entry as the last one!
+ };
+
+ // Status returned in the Read() callback.
+ // kOk : Indicates the second parameter is Non-NULL and contains media data
+ // or the end of the stream.
+ // kAborted : Indicates an aborted Read(). This can happen if the
+ // DemuxerStream gets flushed and doesn't have any more data to
+ // return. The second parameter MUST be NULL when this status is
+ // returned.
+ // kConfigChange : Indicates that the AudioDecoderConfig or
+ // VideoDecoderConfig for the stream has changed.
+ // The DemuxerStream expects an audio_decoder_config() or
+ // video_decoder_config() call before Read() will start
+ // returning DecoderBuffers again. The decoder will need this
+ // new configuration to properly decode the buffers read
+ // from this point forward. The second parameter MUST be NULL
+ // when this status is returned.
+ enum Status {
+ kOk,
+ kAborted,
+ kConfigChanged,
+ };
+
+ // Request a buffer to returned via the provided callback.
+ //
+ // The first parameter indicates the status of the read.
+ // The second parameter is non-NULL and contains media data
+ // or the end of the stream if the first parameter is kOk. NULL otherwise.
+ typedef base::Callback<void(Status,
+ const scoped_refptr<DecoderBuffer>&)>ReadCB;
+ virtual void Read(const ReadCB& read_cb) = 0;
+
+ // Returns the audio decoder configuration. It is an error to call this method
+ // if type() != AUDIO.
+ virtual AudioDecoderConfig audio_decoder_config() = 0;
+
+ // Returns the video decoder configuration. It is an error to call this method
+ // if type() != VIDEO.
+ virtual VideoDecoderConfig video_decoder_config() = 0;
+
+ // Returns the type of stream.
+ virtual Type type() = 0;
+
+ virtual void EnableBitstreamConverter() = 0;
+
+ protected:
+ // Only allow concrete implementations to get deleted.
+ virtual ~DemuxerStream();
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_DEMUXER_STREAM_H_
diff --git a/chromium/media/base/djb2.cc b/chromium/media/base/djb2.cc
new file mode 100644
index 00000000000..8d47ed294e0
--- /dev/null
+++ b/chromium/media/base/djb2.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/djb2.h"
+
+uint32 DJB2Hash(const void* buf, size_t len, uint32 seed) {
+ const uint8* src = reinterpret_cast<const uint8*>(buf);
+ uint32 hash = seed;
+ for (size_t i = 0; i < len; ++i) {
+ hash = hash * 33 + src[i];
+ }
+ return hash;
+}
diff --git a/chromium/media/base/djb2.h b/chromium/media/base/djb2.h
new file mode 100644
index 00000000000..598f9d10e7b
--- /dev/null
+++ b/chromium/media/base/djb2.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_DJB2_H_
+#define MEDIA_BASE_DJB2_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+// DJB2 is a hash algorithm with excellent distribution and speed
+// on many different sets.
+// It has marginally more collisions than FNV1, but makes up for it in
+// performance.
+// The return value is suitable for table lookups.
+// For small fixed sizes (ie a pixel), it has low overhead and inlines well.
+// For large data sets, it optimizes into assembly/simd and is appropriate
+// for realtime applications.
+// See Also:
+// http://www.cse.yorku.ca/~oz/hash.html
+
+static const uint32 kDJB2HashSeed = 5381u;
+
+// These functions perform DJB2 hash. The simplest call is DJB2Hash() to
+// generate the DJB2 hash of the given data:
+// uint32 hash = DJB2Hash(data1, length1, kDJB2HashSeed);
+//
+// You can also compute the DJB2 hash of data incrementally by making multiple
+// calls to DJB2Hash():
+// uint32 hash_value = kDJB2HashSeed; // Initial seed for DJB2.
+// for (size_t i = 0; i < copy_lines; ++i) {
+// hash_value = DJB2Hash(source, bytes_per_line, hash_value);
+// source += source_stride;
+// }
+
+// For the given buffer of data, compute the DJB2 hash of
+// the data. You can call this any number of times during the computation.
+MEDIA_EXPORT uint32 DJB2Hash(const void* buf, size_t len, uint32 seed);
+
+#endif // MEDIA_BASE_DJB2_H_
+
diff --git a/chromium/media/base/djb2_unittest.cc b/chromium/media/base/djb2_unittest.cc
new file mode 100644
index 00000000000..f7898aafa3d
--- /dev/null
+++ b/chromium/media/base/djb2_unittest.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/djb2.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+uint8 kTestData[] = { 1, 2, 3 };
+
+TEST(DJB2HashTest, HashTest) {
+ EXPECT_EQ(DJB2Hash(NULL, 0, 0u), 0u);
+ EXPECT_EQ(DJB2Hash(kTestData, sizeof(kTestData), 5381u),
+ ((5381u * 33u + 1u) * 33u + 2u) * 33u + 3u);
+}
diff --git a/chromium/media/base/fake_audio_render_callback.cc b/chromium/media/base/fake_audio_render_callback.cc
new file mode 100644
index 00000000000..5a0979e9ea0
--- /dev/null
+++ b/chromium/media/base/fake_audio_render_callback.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "media/base/fake_audio_render_callback.h"
+
+namespace media {
+
+FakeAudioRenderCallback::FakeAudioRenderCallback(double step)
+ : half_fill_(false),
+ step_(step),
+ last_audio_delay_milliseconds_(-1),
+ volume_(1) {
+ reset();
+}
+
+FakeAudioRenderCallback::~FakeAudioRenderCallback() {}
+
+int FakeAudioRenderCallback::Render(AudioBus* audio_bus,
+ int audio_delay_milliseconds) {
+ last_audio_delay_milliseconds_ = audio_delay_milliseconds;
+ int number_of_frames = audio_bus->frames();
+ if (half_fill_)
+ number_of_frames /= 2;
+
+ // Fill first channel with a sine wave.
+ for (int i = 0; i < number_of_frames; ++i)
+ audio_bus->channel(0)[i] = sin(2 * M_PI * (x_ + step_ * i));
+ x_ += number_of_frames * step_;
+
+ // Copy first channel into the rest of the channels.
+ for (int i = 1; i < audio_bus->channels(); ++i)
+ memcpy(audio_bus->channel(i), audio_bus->channel(0),
+ number_of_frames * sizeof(*audio_bus->channel(i)));
+
+ return number_of_frames;
+}
+
+double FakeAudioRenderCallback::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ Render(audio_bus, buffer_delay.InMillisecondsF() + 0.5);
+ return volume_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/fake_audio_render_callback.h b/chromium/media/base/fake_audio_render_callback.h
new file mode 100644
index 00000000000..5318c99e851
--- /dev/null
+++ b/chromium/media/base/fake_audio_render_callback.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
+#define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
+
+#include "media/base/audio_converter.h"
+#include "media/base/audio_renderer_sink.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+// Fake RenderCallback which will fill each request with a sine wave. Sine
+// state is kept across callbacks. State can be reset to default via reset().
+// Also provide an interface to AudioTransformInput.
+class FakeAudioRenderCallback
+ : public AudioRendererSink::RenderCallback,
+ public AudioConverter::InputCallback {
+ public:
+ // The function used to fulfill Render() is f(x) = sin(2 * PI * x * |step|),
+ // where x = [|number_of_frames| * m, |number_of_frames| * (m + 1)] and m =
+ // the number of Render() calls fulfilled thus far.
+ explicit FakeAudioRenderCallback(double step);
+ virtual ~FakeAudioRenderCallback();
+
+ // Renders a sine wave into the provided audio data buffer. If |half_fill_|
+ // is set, will only fill half the buffer.
+ virtual int Render(AudioBus* audio_bus,
+ int audio_delay_milliseconds) OVERRIDE;
+ MOCK_METHOD0(OnRenderError, void());
+
+ // AudioTransform::ProvideAudioTransformInput implementation.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Toggles only filling half the requested amount during Render().
+ void set_half_fill(bool half_fill) { half_fill_ = half_fill; }
+
+ // Reset the sine state to initial value.
+ void reset() { x_ = 0; }
+
+ // Returns the last |audio_delay_milliseconds| provided to Render() or -1 if
+ // no Render() call occurred.
+ int last_audio_delay_milliseconds() { return last_audio_delay_milliseconds_; }
+
+ // Set volume information used by ProvideAudioTransformInput().
+ void set_volume(double volume) { volume_ = volume; }
+
+ private:
+ bool half_fill_;
+ double x_;
+ double step_;
+ int last_audio_delay_milliseconds_;
+ double volume_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioRenderCallback);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
diff --git a/chromium/media/base/filter_collection.cc b/chromium/media/base/filter_collection.cc
new file mode 100644
index 00000000000..730835f1919
--- /dev/null
+++ b/chromium/media/base/filter_collection.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/filter_collection.h"
+
+#include "media/base/audio_renderer.h"
+#include "media/base/demuxer.h"
+#include "media/base/video_renderer.h"
+
+namespace media {
+
+FilterCollection::FilterCollection() : demuxer_(NULL) {}
+
+FilterCollection::~FilterCollection() {}
+
+void FilterCollection::SetDemuxer(Demuxer* demuxer) {
+ demuxer_ = demuxer;
+}
+
+Demuxer* FilterCollection::GetDemuxer() {
+ return demuxer_;
+}
+
+void FilterCollection::SetAudioRenderer(
+ scoped_ptr<AudioRenderer> audio_renderer) {
+ audio_renderer_ = audio_renderer.Pass();
+}
+
+scoped_ptr<AudioRenderer> FilterCollection::GetAudioRenderer() {
+ return audio_renderer_.Pass();
+}
+
+void FilterCollection::SetVideoRenderer(
+ scoped_ptr<VideoRenderer> video_renderer) {
+ video_renderer_ = video_renderer.Pass();
+}
+
+scoped_ptr<VideoRenderer> FilterCollection::GetVideoRenderer() {
+ return video_renderer_.Pass();
+}
+
+} // namespace media
diff --git a/chromium/media/base/filter_collection.h b/chromium/media/base/filter_collection.h
new file mode 100644
index 00000000000..90ea0669446
--- /dev/null
+++ b/chromium/media/base/filter_collection.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_FILTER_COLLECTION_H_
+#define MEDIA_BASE_FILTER_COLLECTION_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioRenderer;
+class Demuxer;
+class VideoRenderer;
+
+// Represents a set of uninitialized demuxer and audio/video decoders and
+// renderers. Used to start a Pipeline object for media playback.
+//
+// TODO(scherkus): Replace FilterCollection with something sensible, see
+// http://crbug.com/110800
+class MEDIA_EXPORT FilterCollection {
+ public:
+ FilterCollection();
+ ~FilterCollection();
+
+ void SetDemuxer(Demuxer* demuxer);
+ Demuxer* GetDemuxer();
+
+ void SetAudioRenderer(scoped_ptr<AudioRenderer> audio_renderer);
+ scoped_ptr<AudioRenderer> GetAudioRenderer();
+
+ void SetVideoRenderer(scoped_ptr<VideoRenderer> video_renderer);
+ scoped_ptr<VideoRenderer> GetVideoRenderer();
+
+ private:
+ Demuxer* demuxer_;
+ scoped_ptr<AudioRenderer> audio_renderer_;
+ scoped_ptr<VideoRenderer> video_renderer_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilterCollection);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_FILTER_COLLECTION_H_
diff --git a/chromium/media/base/gmock_callback_support.h b/chromium/media/base/gmock_callback_support.h
new file mode 100644
index 00000000000..22f4c10ad4f
--- /dev/null
+++ b/chromium/media/base/gmock_callback_support.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
+#define MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+// Matchers for base::Callback and base::Closure.
+
+MATCHER(IsNullCallback, "a null callback") {
+ return (arg.is_null());
+}
+
+MATCHER(IsNotNullCallback, "a non-null callback") {
+ return (!arg.is_null());
+}
+
+// The RunClosure<N>() action invokes Run() method on the N-th (0-based)
+// argument of the mock function.
+
+ACTION_TEMPLATE(RunClosure,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ ::std::tr1::get<k>(args).Run();
+}
+
+// Various overloads for RunCallback<N>().
+//
+// The RunCallback<N>(p1, p2, ..., p_k) action invokes Run() method on the N-th
+// (0-based) argument of the mock function, with arguments p1, p2, ..., p_k.
+//
+// Notes:
+//
+// 1. The arguments are passed by value by default. If you need to
+// pass an argument by reference, wrap it inside ByRef(). For example,
+//
+// RunCallback<1>(5, string("Hello"), ByRef(foo))
+//
+// passes 5 and string("Hello") by value, and passes foo by reference.
+//
+// 2. If the callback takes an argument by reference but ByRef() is
+// not used, it will receive the reference to a copy of the value,
+// instead of the original value. For example, when the 0-th
+// argument of the callback takes a const string&, the action
+//
+// RunCallback<0>(string("Hello"))
+//
+// makes a copy of the temporary string("Hello") object and passes a
+// reference of the copy, instead of the original temporary object,
+// to the callback. This makes it easy for a user to define an
+// RunCallback action from temporary values and have it performed later.
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ return ::std::tr1::get<k>(args).Run();
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(p0)) {
+ return ::std::tr1::get<k>(args).Run(p0);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_2_VALUE_PARAMS(p0, p1)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_3_VALUE_PARAMS(p0, p1, p2)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1, p2);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4, p5);
+}
+
+ACTION_TEMPLATE(RunCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
+ return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4, p5, p6);
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
diff --git a/chromium/media/base/gmock_callback_support_unittest.cc b/chromium/media/base/gmock_callback_support_unittest.cc
new file mode 100644
index 00000000000..fb1beb9870d
--- /dev/null
+++ b/chromium/media/base/gmock_callback_support_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/gmock_callback_support.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::ByRef;
+using testing::MockFunction;
+
+namespace media {
+
+typedef base::Callback<void(const bool& src, bool* dst)> TestCallback;
+
+void SetBool(const bool& src, bool* dst) {
+ *dst = src;
+}
+
+TEST(GmockCallbackSupportTest, IsNullCallback) {
+ MockFunction<void(const TestCallback&)> check;
+ EXPECT_CALL(check, Call(IsNullCallback()));
+ check.Call(TestCallback());
+}
+
+TEST(GmockCallbackSupportTest, IsNotNullCallback) {
+ MockFunction<void(const TestCallback&)> check;
+ EXPECT_CALL(check, Call(IsNotNullCallback()));
+ check.Call(base::Bind(&SetBool));
+}
+
+TEST(GmockCallbackSupportTest, RunClosure) {
+ MockFunction<void(const base::Closure&)> check;
+ bool dst = false;
+ EXPECT_CALL(check, Call(IsNotNullCallback()))
+ .WillOnce(RunClosure<0>());
+ check.Call(base::Bind(&SetBool, true, &dst));
+ EXPECT_TRUE(dst);
+}
+
+TEST(GmockCallbackSupportTest, RunCallback0) {
+ MockFunction<void(const TestCallback&)> check;
+ bool dst = false;
+ EXPECT_CALL(check, Call(IsNotNullCallback()))
+ .WillOnce(RunCallback<0>(true, &dst));
+ check.Call(base::Bind(&SetBool));
+ EXPECT_TRUE(dst);
+}
+
+TEST(GmockCallbackSupportTest, RunCallback1) {
+ MockFunction<void(int, const TestCallback&)> check;
+ bool dst = false;
+ EXPECT_CALL(check, Call(0, IsNotNullCallback()))
+ .WillOnce(RunCallback<1>(true, &dst));
+ check.Call(0, base::Bind(&SetBool));
+ EXPECT_TRUE(dst);
+}
+
+TEST(GmockCallbackSupportTest, RunCallbackPassByRef) {
+ MockFunction<void(const TestCallback&)> check;
+ bool dst = false;
+ bool src = false;
+ EXPECT_CALL(check, Call(IsNotNullCallback()))
+ .WillOnce(RunCallback<0>(ByRef(src), &dst));
+ src = true;
+ check.Call(base::Bind(&SetBool));
+ EXPECT_TRUE(dst);
+}
+
+TEST(GmockCallbackSupportTest, RunCallbackPassByValue) {
+ MockFunction<void(const TestCallback&)> check;
+ bool dst = false;
+ bool src = true;
+ EXPECT_CALL(check, Call(IsNotNullCallback()))
+ .WillOnce(RunCallback<0>(src, &dst));
+ src = false;
+ check.Call(base::Bind(&SetBool));
+ EXPECT_TRUE(dst);
+}
+
+} // namespace media
diff --git a/chromium/media/base/limits.h b/chromium/media/base/limits.h
new file mode 100644
index 00000000000..ed7ac513c72
--- /dev/null
+++ b/chromium/media/base/limits.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains limit definition constants for the media subsystem.
+
+#ifndef MEDIA_BASE_LIMITS_H_
+#define MEDIA_BASE_LIMITS_H_
+
+#include "base/basictypes.h"
+
+namespace media {
+
+namespace limits {
+
+enum {
+ // Maximum possible dimension (width or height) for any video.
+ kMaxDimension = (1 << 15) - 1, // 32767
+
+ // Maximum possible canvas size (width multiplied by height) for any video.
+ kMaxCanvas = (1 << (14 * 2)), // 16384 x 16384
+
+ // Total number of video frames which are populating in the pipeline.
+ kMaxVideoFrames = 4,
+
+ // The following limits are used by AudioParameters::IsValid().
+ //
+ // A few notes on sample rates of common formats:
+ // - AAC files are limited to 96 kHz.
+ // - MP3 files are limited to 48 kHz.
+ // - Vorbis used to be limited to 96 KHz, but no longer has that
+ // restriction.
+ // - Most PC audio hardware is limited to 192 KHz.
+ kMaxSampleRate = 192000,
+ kMinSampleRate = 3000,
+ kMaxChannels = 32,
+ kMaxBytesPerSample = 4,
+ kMaxBitsPerSample = kMaxBytesPerSample * 8,
+ kMaxSamplesPerPacket = kMaxSampleRate,
+ kMaxPacketSizeInBytes =
+ kMaxBytesPerSample * kMaxChannels * kMaxSamplesPerPacket,
+
+ // This limit is used by ParamTraits<VideoCaptureParams>.
+ kMaxFramesPerSecond = 1000,
+};
+
+} // namespace limits
+
+} // namespace media
+
+#endif // MEDIA_BASE_LIMITS_H_
diff --git a/chromium/media/base/media.cc b/chromium/media/base/media.cc
new file mode 100644
index 00000000000..e1bb4b2c102
--- /dev/null
+++ b/chromium/media/base/media.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media.h"
+
+#include "base/files/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/path_service.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+#include "media/base/sinc_resampler.h"
+#include "media/base/vector_math.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+namespace internal {
+// Platform specific initialization method.
+extern bool InitializeMediaLibraryInternal(const base::FilePath& module_dir);
+} // namespace internal
+
+// Media must only be initialized once, so use a LazyInstance to ensure this.
+class MediaInitializer {
+ public:
+ bool Initialize(const base::FilePath& module_dir) {
+ base::AutoLock auto_lock(lock_);
+ if (!tried_initialize_) {
+ tried_initialize_ = true;
+ initialized_ = internal::InitializeMediaLibraryInternal(module_dir);
+ }
+ return initialized_;
+ }
+
+ bool IsInitialized() {
+ base::AutoLock auto_lock(lock_);
+ return initialized_;
+ }
+
+ private:
+ friend struct base::DefaultLazyInstanceTraits<MediaInitializer>;
+
+ MediaInitializer()
+ : initialized_(false),
+ tried_initialize_(false) {
+ // Perform initialization of libraries which require runtime CPU detection.
+ // TODO(dalecurtis): Add initialization of YUV, SincResampler.
+ vector_math::Initialize();
+#if !defined(OS_IOS)
+ SincResampler::InitializeCPUSpecificFeatures();
+ InitializeCPUSpecificYUVConversions();
+#endif
+ }
+
+ ~MediaInitializer() {
+ NOTREACHED() << "MediaInitializer should be leaky!";
+ }
+
+ base::Lock lock_;
+ bool initialized_;
+ bool tried_initialize_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaInitializer);
+};
+
+static base::LazyInstance<MediaInitializer>::Leaky g_media_library =
+ LAZY_INSTANCE_INITIALIZER;
+
+bool InitializeMediaLibrary(const base::FilePath& module_dir) {
+ return g_media_library.Get().Initialize(module_dir);
+}
+
+void InitializeMediaLibraryForTesting() {
+ base::FilePath module_dir;
+ CHECK(PathService::Get(base::DIR_EXE, &module_dir));
+ CHECK(g_media_library.Get().Initialize(module_dir));
+}
+
+bool IsMediaLibraryInitialized() {
+ return g_media_library.Get().IsInitialized();
+}
+
+void InitializeCPUSpecificMediaFeatures() {
+ // Force initialization of the media initializer, but don't call Initialize().
+ g_media_library.Get();
+}
+
+} // namespace media
diff --git a/chromium/media/base/media.h b/chromium/media/base/media.h
new file mode 100644
index 00000000000..c86e5683fac
--- /dev/null
+++ b/chromium/media/base/media.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains code that should be used for initializing, or querying the state
+// of the media library as a whole.
+
+#ifndef MEDIA_BASE_MEDIA_H_
+#define MEDIA_BASE_MEDIA_H_
+
+#include "media/base/media_export.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace media {
+
+// Attempts to initialize the media library (loading DLLs, DSOs, etc.).
+//
+// If |module_dir| is the empty string, then the system default library paths
+// are searched for the dynamic libraries. If a |module_dir| is provided, then
+// only the specified |module_dir| will be searched for the dynamic libraries.
+//
+// If multiple initializations are attempted with different |module_dir|s
+// specified then the first one to succeed remains effective for the lifetime
+// of the process.
+//
+// Returns true if everything was successfully initialized, false otherwise.
+MEDIA_EXPORT bool InitializeMediaLibrary(const base::FilePath& module_dir);
+
+// Helper function for unit tests to avoid boiler plate code everywhere. This
+// function will crash if it fails to load the media library. This ensures tests
+// fail if the media library is not available.
+MEDIA_EXPORT void InitializeMediaLibraryForTesting();
+
+// Use this if you need to check whether the media library is initialized
+// for the this process, without actually trying to initialize it.
+MEDIA_EXPORT bool IsMediaLibraryInitialized();
+
+// Use this if you need to initialize CPU specific features WITHOUT loading
+// DLLs, DSOs, etc. Only necessary if InitializeMediaLibrary() is not called;
+// does nothing if the media library has already been initialized.
+MEDIA_EXPORT void InitializeCPUSpecificMediaFeatures();
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_H_
diff --git a/chromium/media/base/media_export.h b/chromium/media/base/media_export.h
new file mode 100644
index 00000000000..48f6f45e59a
--- /dev/null
+++ b/chromium/media/base/media_export.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_EXPORT_H_
+#define MEDIA_BASE_MEDIA_EXPORT_H_
+
+// Define MEDIA_EXPORT so that functionality implemented by the Media module
+// can be exported to consumers.
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(MEDIA_IMPLEMENTATION)
+#define MEDIA_EXPORT __declspec(dllexport)
+#else
+#define MEDIA_EXPORT __declspec(dllimport)
+#endif // defined(MEDIA_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(MEDIA_IMPLEMENTATION)
+#define MEDIA_EXPORT __attribute__((visibility("default")))
+#else
+#define MEDIA_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define MEDIA_EXPORT
+#endif
+
+#endif // MEDIA_BASE_MEDIA_EXPORT_H_
diff --git a/chromium/media/base/media_file_checker.cc b/chromium/media/base/media_file_checker.cc
new file mode 100644
index 00000000000..d4708e506c7
--- /dev/null
+++ b/chromium/media/base/media_file_checker.cc
@@ -0,0 +1,110 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_file_checker.h"
+
+#include <map>
+
+#include "base/bind.h"
+#include "base/time/time.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/blocking_url_protocol.h"
+#include "media/filters/ffmpeg_glue.h"
+#include "media/filters/file_data_source.h"
+
+namespace media {
+
+static const int64 kMaxCheckTimeInSeconds = 5;
+
+static void OnError(bool* called) {
+ *called = false;
+}
+
+MediaFileChecker::MediaFileChecker(const base::PlatformFile& file)
+ : file_(file),
+ file_closer_(&file_) {
+}
+
+MediaFileChecker::~MediaFileChecker() {
+}
+
+bool MediaFileChecker::Start(base::TimeDelta check_time) {
+ media::FileDataSource source;
+ bool read_ok = true;
+ media::BlockingUrlProtocol protocol(&source, base::Bind(&OnError, &read_ok));
+ media::FFmpegGlue glue(&protocol);
+ source.InitializeFromPlatformFile(file_);
+ AVFormatContext* format_context = glue.format_context();
+
+ if (!glue.OpenContext())
+ return false;
+
+ if (avformat_find_stream_info(format_context, NULL) < 0)
+ return false;
+
+ // Remember the codec context for any decodable audio or video streams.
+ std::map<int, AVCodecContext*> stream_contexts;
+ for (size_t i = 0; i < format_context->nb_streams; ++i) {
+ AVCodecContext* c = format_context->streams[i]->codec;
+ if (c->codec_type == AVMEDIA_TYPE_AUDIO ||
+ c->codec_type == AVMEDIA_TYPE_VIDEO) {
+ AVCodec* codec = avcodec_find_decoder(c->codec_id);
+ if (codec && avcodec_open2(c, codec, NULL) >= 0)
+ stream_contexts[i] = c;
+ }
+ }
+
+ if (stream_contexts.size() == 0)
+ return false;
+
+ AVPacket packet;
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> frame(
+ avcodec_alloc_frame());
+ int result = 0;
+
+ base::Time deadline = base::Time::Now() +
+ std::min(check_time,
+ base::TimeDelta::FromSeconds(kMaxCheckTimeInSeconds));
+ do {
+ result = av_read_frame(glue.format_context(), &packet);
+ if (result < 0)
+ break;
+ result = av_dup_packet(&packet);
+ if (result < 0)
+ break;
+
+ std::map<int, AVCodecContext*>::const_iterator it =
+ stream_contexts.find(packet.stream_index);
+ if (it == stream_contexts.end()) {
+ av_free_packet(&packet);
+ continue;
+ }
+ AVCodecContext* av_context = it->second;
+
+ int frame_decoded = 0;
+ if (av_context->codec_type == AVMEDIA_TYPE_AUDIO) {
+ // A shallow copy of packet so we can slide packet.data as frames are
+ // decoded; otherwise av_free_packet() will corrupt memory.
+ AVPacket temp_packet = packet;
+ do {
+ avcodec_get_frame_defaults(frame.get());
+ result = avcodec_decode_audio4(av_context, frame.get(), &frame_decoded,
+ &temp_packet);
+ if (result < 0)
+ break;
+ temp_packet.size -= result;
+ temp_packet.data += result;
+ } while (temp_packet.size > 0);
+ } else if (av_context->codec_type == AVMEDIA_TYPE_VIDEO) {
+ avcodec_get_frame_defaults(frame.get());
+ result = avcodec_decode_video2(av_context, frame.get(), &frame_decoded,
+ &packet);
+ }
+ av_free_packet(&packet);
+ } while (base::Time::Now() < deadline && read_ok && result >= 0);
+
+ return read_ok && (result == AVERROR_EOF || result >= 0);
+}
+
+} // namespace media
diff --git a/chromium/media/base/media_file_checker.h b/chromium/media/base/media_file_checker.h
new file mode 100644
index 00000000000..6e8fc9f285f
--- /dev/null
+++ b/chromium/media/base/media_file_checker.h
@@ -0,0 +1,41 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_FILE_CHECKER_H_
+#define MEDIA_BASE_MEDIA_FILE_CHECKER_H_
+
+#include "base/basictypes.h"
+#include "base/files/scoped_platform_file_closer.h"
+#include "base/platform_file.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class TimeDelta;
+}
+
+namespace media {
+
+// This class tries to determine if a file is a valid media file. The entire
+// file is not decoded so a positive result from this class does not make the
+// file safe to use in the browser process.
+class MEDIA_EXPORT MediaFileChecker {
+ public:
+ explicit MediaFileChecker(const base::PlatformFile& file);
+ ~MediaFileChecker();
+
+ // After opening |file|, up to |check_time| amount of wall-clock time is spent
+ // decoding the file. The amount of audio/video data decoded will depend on
+ // the bitrate of the file and the speed of the CPU.
+ bool Start(base::TimeDelta check_time);
+
+ private:
+ base::PlatformFile file_;
+ base::ScopedPlatformFileCloser file_closer_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaFileChecker);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_FILE_CHECKER_H_
diff --git a/chromium/media/base/media_file_checker_unittest.cc b/chromium/media/base/media_file_checker_unittest.cc
new file mode 100644
index 00000000000..f43c846776c
--- /dev/null
+++ b/chromium/media/base/media_file_checker_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "media/base/media_file_checker.h"
+#include "media/base/test_data_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void RunMediaFileChecker(const std::string& filename, bool expectation) {
+ base::PlatformFileError error;
+ base::PlatformFile file = base::CreatePlatformFile(
+ GetTestDataFilePath(filename),
+ base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
+ NULL,
+ &error);
+ ASSERT_EQ(base::PLATFORM_FILE_OK, error);
+
+ MediaFileChecker checker(file);
+ const base::TimeDelta check_time = base::TimeDelta::FromMilliseconds(100);
+ bool result = checker.Start(check_time);
+ EXPECT_EQ(expectation, result);
+
+ base::ClosePlatformFile(file);
+}
+
+TEST(MediaFileCheckerTest, InvalidFile) {
+ RunMediaFileChecker("ten_byte_file", false);
+}
+
+TEST(MediaFileCheckerTest, Video) {
+ RunMediaFileChecker("bear.ogv", true);
+}
+
+TEST(MediaFileCheckerTest, Audio) {
+ RunMediaFileChecker("sfx.ogg", true);
+}
+
+#if defined(GOOGLE_CHROME_BUILD) || defined(USE_PROPRIETARY_CODECS)
+TEST(MediaFileCheckerTest, MP3) {
+ RunMediaFileChecker("sfx.mp3", true);
+}
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/media_keys.cc b/chromium/media/base/media_keys.cc
new file mode 100644
index 00000000000..fd01de12f90
--- /dev/null
+++ b/chromium/media/base/media_keys.cc
@@ -0,0 +1,15 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_keys.h"
+
+namespace media {
+
+MediaKeys::MediaKeys() {}
+
+MediaKeys::~MediaKeys() {}
+
+Decryptor* MediaKeys::GetDecryptor() { return NULL; }
+
+} // namespace media
diff --git a/chromium/media/base/media_keys.h b/chromium/media/base/media_keys.h
new file mode 100644
index 00000000000..482248394fd
--- /dev/null
+++ b/chromium/media/base/media_keys.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_KEYS_H_
+#define MEDIA_BASE_MEDIA_KEYS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class Decryptor;
+
+// Performs media key operations.
+//
+// All key operations are called on the renderer thread. Therefore, these calls
+// should be fast and nonblocking; key events should be fired asynchronously.
+class MEDIA_EXPORT MediaKeys {
+ public:
+ // Reported to UMA, so never reuse a value!
+ // Must be kept in sync with WebKit::WebMediaPlayerClient::MediaKeyErrorCode
+ // (enforced in webmediaplayer_impl.cc).
+ enum KeyError {
+ kUnknownError = 1,
+ kClientError,
+ // The following v0.1b values have never been used.
+ // kServiceError,
+ // kOutputError,
+ // kHardwareChangeError,
+ // kDomainError,
+ kMaxKeyError // Must be last and greater than any legit value.
+ };
+
+ MediaKeys();
+ virtual ~MediaKeys();
+
+ // Generates a key request with the |type| and |init_data| provided.
+ // Returns true if generating key request succeeded, false otherwise.
+ // Note: AddKey() and CancelKeyRequest() should only be called after
+ // GenerateKeyRequest() returns true.
+ virtual bool GenerateKeyRequest(const std::string& type,
+ const uint8* init_data,
+ int init_data_length) = 0;
+
+ // Adds a |key| to the session. The |key| is not limited to a decryption
+ // key. It can be any data that the key system accepts, such as a license.
+ // If multiple calls of this function set different keys for the same
+ // key ID, the older key will be replaced by the newer key.
+ virtual void AddKey(const uint8* key, int key_length,
+ const uint8* init_data, int init_data_length,
+ const std::string& session_id) = 0;
+
+ // Cancels the key request specified by |session_id|.
+ virtual void CancelKeyRequest(const std::string& session_id) = 0;
+
+ // Gets the Decryptor object associated with the MediaKeys. Returns NULL if
+ // no Decryptor object is associated. The returned object is only guaranteed
+ // to be valid during the MediaKeys' lifetime.
+ virtual Decryptor* GetDecryptor();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MediaKeys);
+};
+
+// Key event callbacks. See the spec for details:
+// http://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#event-summary
+typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
+
+typedef base::Callback<void(const std::string& session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code)> KeyErrorCB;
+
+typedef base::Callback<void(const std::string& session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url)> KeyMessageCB;
+
+typedef base::Callback<void(const std::string& session_id,
+ const std::string& type,
+ scoped_ptr<uint8[]> init_data,
+ int init_data_size)> NeedKeyCB;
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_KEYS_H_
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
new file mode 100644
index 00000000000..8a07b020c7d
--- /dev/null
+++ b/chromium/media/base/media_log.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_log.h"
+
+#include <string>
+
+#include "base/atomic_sequence_num.h"
+#include "base/logging.h"
+#include "base/values.h"
+
+namespace media {
+
+// A count of all MediaLogs created in the current process. Used to generate
+// unique IDs.
+static base::StaticAtomicSequenceNumber g_media_log_count;
+
+const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
+ switch (type) {
+ case MediaLogEvent::WEBMEDIAPLAYER_CREATED:
+ return "WEBMEDIAPLAYER_CREATED";
+ case MediaLogEvent::WEBMEDIAPLAYER_DESTROYED:
+ return "WEBMEDIAPLAYER_DESTROYED";
+ case MediaLogEvent::PIPELINE_CREATED:
+ return "PIPELINE_CREATED";
+ case MediaLogEvent::PIPELINE_DESTROYED:
+ return "PIPELINE_DESTROYED";
+ case MediaLogEvent::LOAD:
+ return "LOAD";
+ case MediaLogEvent::SEEK:
+ return "SEEK";
+ case MediaLogEvent::PLAY:
+ return "PLAY";
+ case MediaLogEvent::PAUSE:
+ return "PAUSE";
+ case MediaLogEvent::PIPELINE_STATE_CHANGED:
+ return "PIPELINE_STATE_CHANGED";
+ case MediaLogEvent::PIPELINE_ERROR:
+ return "PIPELINE_ERROR";
+ case MediaLogEvent::VIDEO_SIZE_SET:
+ return "VIDEO_SIZE_SET";
+ case MediaLogEvent::DURATION_SET:
+ return "DURATION_SET";
+ case MediaLogEvent::TOTAL_BYTES_SET:
+ return "TOTAL_BYTES_SET";
+ case MediaLogEvent::NETWORK_ACTIVITY_SET:
+ return "NETWORK_ACTIVITY_SET";
+ case MediaLogEvent::AUDIO_ENDED:
+ return "AUDIO_ENDED";
+ case MediaLogEvent::VIDEO_ENDED:
+ return "VIDEO_ENDED";
+ case MediaLogEvent::AUDIO_RENDERER_DISABLED:
+ return "AUDIO_RENDERER_DISABLED";
+ case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
+ return "BUFFERED_EXTENTS_CHANGED";
+ case MediaLogEvent::MEDIA_SOURCE_ERROR:
+ return "MEDIA_SOURCE_ERROR";
+ case MediaLogEvent::PROPERTY_CHANGE:
+ return "PROPERTY_CHANGE";
+ }
+ NOTREACHED();
+ return NULL;
+}
+
+const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
+ switch (status) {
+ case PIPELINE_OK:
+ return "pipeline: ok";
+ case PIPELINE_ERROR_URL_NOT_FOUND:
+ return "pipeline: url not found";
+ case PIPELINE_ERROR_NETWORK:
+ return "pipeline: network error";
+ case PIPELINE_ERROR_DECODE:
+ return "pipeline: decode error";
+ case PIPELINE_ERROR_DECRYPT:
+ return "pipeline: decrypt error";
+ case PIPELINE_ERROR_ABORT:
+ return "pipeline: abort";
+ case PIPELINE_ERROR_INITIALIZATION_FAILED:
+ return "pipeline: initialization failed";
+ case PIPELINE_ERROR_COULD_NOT_RENDER:
+ return "pipeline: could not render";
+ case PIPELINE_ERROR_READ:
+ return "pipeline: read error";
+ case PIPELINE_ERROR_OPERATION_PENDING:
+ return "pipeline: operation pending";
+ case PIPELINE_ERROR_INVALID_STATE:
+ return "pipeline: invalid state";
+ case DEMUXER_ERROR_COULD_NOT_OPEN:
+ return "demuxer: could not open";
+ case DEMUXER_ERROR_COULD_NOT_PARSE:
+ return "dumuxer: could not parse";
+ case DEMUXER_ERROR_NO_SUPPORTED_STREAMS:
+ return "demuxer: no supported streams";
+ case DECODER_ERROR_NOT_SUPPORTED:
+ return "decoder: not supported";
+ case PIPELINE_STATUS_MAX:
+ NOTREACHED();
+ }
+ NOTREACHED();
+ return NULL;
+}
+
+LogHelper::LogHelper(const LogCB& log_cb) : log_cb_(log_cb) {}
+
+LogHelper::~LogHelper() {
+ if (log_cb_.is_null())
+ return;
+ log_cb_.Run(stream_.str());
+}
+
+MediaLog::MediaLog() : id_(g_media_log_count.GetNext()) {}
+
+MediaLog::~MediaLog() {}
+
+void MediaLog::AddEvent(scoped_ptr<MediaLogEvent> event) {}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateEvent(MediaLogEvent::Type type) {
+ scoped_ptr<MediaLogEvent> event(new MediaLogEvent);
+ event->id = id_;
+ event->type = type;
+ event->time = base::TimeTicks::Now();
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateBooleanEvent(
+ MediaLogEvent::Type type, const char* property, bool value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(type));
+ event->params.SetBoolean(property, value);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateStringEvent(
+ MediaLogEvent::Type type, const char* property, const std::string& value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(type));
+ event->params.SetString(property, value);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
+ MediaLogEvent::Type type, const char* property, base::TimeDelta value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(type));
+ event->params.SetDouble(property, value.InSecondsF());
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateLoadEvent(const std::string& url) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::LOAD));
+ event->params.SetString("url", url);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateSeekEvent(float seconds) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::SEEK));
+ event->params.SetDouble("seek_target", seconds);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineStateChangedEvent(
+ Pipeline::State state) {
+ scoped_ptr<MediaLogEvent> event(
+ CreateEvent(MediaLogEvent::PIPELINE_STATE_CHANGED));
+ event->params.SetString("pipeline_state", Pipeline::GetStateString(state));
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineErrorEvent(
+ PipelineStatus error) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PIPELINE_ERROR));
+ event->params.SetString("pipeline_error", PipelineStatusToString(error));
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateVideoSizeSetEvent(
+ size_t width, size_t height) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::VIDEO_SIZE_SET));
+ event->params.SetInteger("width", width);
+ event->params.SetInteger("height", height);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateBufferedExtentsChangedEvent(
+ int64 start, int64 current, int64 end) {
+ scoped_ptr<MediaLogEvent> event(
+ CreateEvent(MediaLogEvent::BUFFERED_EXTENTS_CHANGED));
+ // These values are headed to JS where there is no int64 so we use a double
+ // and accept loss of precision above 2^53 bytes (8 Exabytes).
+ event->params.SetDouble("buffer_start", start);
+ event->params.SetDouble("buffer_current", current);
+ event->params.SetDouble("buffer_end", end);
+ return event.Pass();
+}
+
+scoped_ptr<MediaLogEvent> MediaLog::CreateMediaSourceErrorEvent(
+ const std::string& error) {
+ scoped_ptr<MediaLogEvent> event(
+ CreateEvent(MediaLogEvent::MEDIA_SOURCE_ERROR));
+ event->params.SetString("error", error);
+ return event.Pass();
+}
+
+void MediaLog::SetStringProperty(
+ const char* key, const std::string& value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ event->params.SetString(key, value);
+ AddEvent(event.Pass());
+}
+
+void MediaLog::SetIntegerProperty(
+ const char* key, int value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ event->params.SetInteger(key, value);
+ AddEvent(event.Pass());
+}
+
+void MediaLog::SetDoubleProperty(
+ const char* key, double value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ event->params.SetDouble(key, value);
+ AddEvent(event.Pass());
+}
+
+void MediaLog::SetBooleanProperty(
+ const char* key, bool value) {
+ scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PROPERTY_CHANGE));
+ event->params.SetBoolean(key, value);
+ AddEvent(event.Pass());
+}
+
+} //namespace media
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
new file mode 100644
index 00000000000..1d25c0973a5
--- /dev/null
+++ b/chromium/media/base/media_log.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_LOG_H_
+#define MEDIA_BASE_MEDIA_LOG_H_
+
+#include <sstream>
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+#include "media/base/media_log_event.h"
+#include "media/base/pipeline.h"
+#include "media/base/pipeline_status.h"
+
+namespace media {
+
+// Indicates a string should be added to the log.
+// First parameter - The string to add to the log.
+typedef base::Callback<void(const std::string&)> LogCB;
+
+// Helper class to make it easier to use log_cb like DVLOG().
+class LogHelper {
+ public:
+ LogHelper(const LogCB& Log_cb);
+ ~LogHelper();
+
+ std::ostream& stream() { return stream_; }
+
+ private:
+ LogCB log_cb_;
+ std::stringstream stream_;
+};
+
+#define MEDIA_LOG(log_cb) LogHelper(log_cb).stream()
+
+class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
+ public:
+ // Convert various enums to strings.
+ static const char* EventTypeToString(MediaLogEvent::Type type);
+ static const char* PipelineStatusToString(PipelineStatus);
+
+ MediaLog();
+
+ // Add an event to this log. Overriden by inheritors to actually do something
+ // with it.
+ virtual void AddEvent(scoped_ptr<MediaLogEvent> event);
+
+ // Helper methods to create events and their parameters.
+ scoped_ptr<MediaLogEvent> CreateEvent(MediaLogEvent::Type type);
+ scoped_ptr<MediaLogEvent> CreateBooleanEvent(
+ MediaLogEvent::Type type, const char* property, bool value);
+ scoped_ptr<MediaLogEvent> CreateStringEvent(
+ MediaLogEvent::Type type, const char* property, const std::string& value);
+ scoped_ptr<MediaLogEvent> CreateTimeEvent(
+ MediaLogEvent::Type type, const char* property, base::TimeDelta value);
+ scoped_ptr<MediaLogEvent> CreateLoadEvent(const std::string& url);
+ scoped_ptr<MediaLogEvent> CreateSeekEvent(float seconds);
+ scoped_ptr<MediaLogEvent> CreatePipelineStateChangedEvent(
+ Pipeline::State state);
+ scoped_ptr<MediaLogEvent> CreatePipelineErrorEvent(PipelineStatus error);
+ scoped_ptr<MediaLogEvent> CreateVideoSizeSetEvent(
+ size_t width, size_t height);
+ scoped_ptr<MediaLogEvent> CreateBufferedExtentsChangedEvent(
+ int64 start, int64 current, int64 end);
+ scoped_ptr<MediaLogEvent> CreateMediaSourceErrorEvent(
+ const std::string& error);
+
+ // Report a property change without an accompanying event.
+ void SetStringProperty(const char* key, const std::string& value);
+ void SetIntegerProperty(const char* key, int value);
+ void SetDoubleProperty(const char* key, double value);
+ void SetBooleanProperty(const char* key, bool value);
+
+ protected:
+ friend class base::RefCountedThreadSafe<MediaLog>;
+ virtual ~MediaLog();
+
+ private:
+ // A unique (to this process) id for this MediaLog.
+ int32 id_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaLog);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_LOG_H_
diff --git a/chromium/media/base/media_log_event.h b/chromium/media/base/media_log_event.h
new file mode 100644
index 00000000000..811d1131a7a
--- /dev/null
+++ b/chromium/media/base/media_log_event.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_LOG_EVENT_H_
+#define MEDIA_BASE_MEDIA_LOG_EVENT_H_
+
+#include "base/time/time.h"
+#include "base/values.h"
+
+namespace media {
+
+struct MediaLogEvent {
+ MediaLogEvent() {}
+
+ MediaLogEvent(const MediaLogEvent& event) {
+ *this = event;
+ }
+
+ MediaLogEvent& operator=(const MediaLogEvent& event) {
+ id = event.id;
+ type = event.type;
+ scoped_ptr<base::DictionaryValue> event_copy(event.params.DeepCopy());
+ params.Swap(event_copy.get());
+ time = event.time;
+ return *this;
+ }
+
+ enum Type {
+ // A WebMediaPlayer is being created or destroyed.
+ // params: none.
+ WEBMEDIAPLAYER_CREATED,
+ WEBMEDIAPLAYER_DESTROYED,
+
+ // A Pipeline is being created or destroyed.
+ // params: none.
+ PIPELINE_CREATED,
+ PIPELINE_DESTROYED,
+
+ // A media player is loading a resource.
+ // params: "url": <URL of the resource>.
+ LOAD,
+
+ // A media player has started seeking.
+ // params: "seek_target": <number of seconds to which to seek>.
+ SEEK,
+
+ // A media player has been told to play or pause.
+ // params: none.
+ PLAY,
+ PAUSE,
+
+ // The state of Pipeline has changed.
+ // params: "pipeline_state": <string name of the state>.
+ PIPELINE_STATE_CHANGED,
+
+ // An error has occurred in the pipeline.
+ // params: "pipeline_error": <string name of the error>.
+ PIPELINE_ERROR,
+
+ // The size of the video has been determined.
+ // params: "width": <integral width of the video>.
+ // "height": <integral height of the video>.
+ VIDEO_SIZE_SET,
+
+ // A property of the pipeline has been set by a filter.
+ // These take a single parameter based upon the name of the event and of
+ // the appropriate type. e.g. DURATION_SET: "duration" of type TimeDelta.
+ DURATION_SET,
+ TOTAL_BYTES_SET,
+ NETWORK_ACTIVITY_SET,
+
+ // Audio/Video stream playback has ended.
+ AUDIO_ENDED,
+ VIDEO_ENDED,
+
+ // The audio renderer has been disabled.
+ // params: none.
+ AUDIO_RENDERER_DISABLED,
+
+ // The extents of the sliding buffer have changed.
+ // params: "buffer_start": <first buffered byte>.
+ // "buffer_current": <current offset>.
+ // "buffer_end": <last buffered byte>.
+ BUFFERED_EXTENTS_CHANGED,
+
+ // Errors reported by Media Source Extensions code.
+ MEDIA_SOURCE_ERROR,
+ // params: "error": Error string describing the error detected.
+
+ // A property has changed without any special event occurring.
+ PROPERTY_CHANGE,
+ };
+
+ int32 id;
+ Type type;
+ base::DictionaryValue params;
+ base::TimeTicks time;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_LOG_EVENT_H_
diff --git a/chromium/media/base/media_posix.cc b/chromium/media/base/media_posix.cc
new file mode 100644
index 00000000000..4aabae9106e
--- /dev/null
+++ b/chromium/media/base/media_posix.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media.h"
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/strings/stringize_macros.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "third_party/ffmpeg/ffmpeg_stubs.h"
+
+using third_party_ffmpeg::kNumStubModules;
+using third_party_ffmpeg::kModuleFfmpegsumo;
+using third_party_ffmpeg::InitializeStubs;
+using third_party_ffmpeg::StubPathMap;
+
+namespace media {
+namespace internal {
+
+// Handy to prevent shooting ourselves in the foot with macro wizardry.
+#if !defined(LIBAVCODEC_VERSION_MAJOR) || \
+ !defined(LIBAVFORMAT_VERSION_MAJOR) || \
+ !defined(LIBAVUTIL_VERSION_MAJOR)
+#error FFmpeg headers not included!
+#endif
+
+#define AVCODEC_VERSION STRINGIZE(LIBAVCODEC_VERSION_MAJOR)
+#define AVFORMAT_VERSION STRINGIZE(LIBAVFORMAT_VERSION_MAJOR)
+#define AVUTIL_VERSION STRINGIZE(LIBAVUTIL_VERSION_MAJOR)
+
+#if defined(OS_MACOSX)
+// TODO(evan): should be using .so like ffmepgsumo here.
+#define DSO_NAME(MODULE, VERSION) ("lib" MODULE "." VERSION ".dylib")
+static const base::FilePath::CharType kSumoLib[] =
+ FILE_PATH_LITERAL("ffmpegsumo.so");
+#elif defined(OS_POSIX)
+#define DSO_NAME(MODULE, VERSION) ("lib" MODULE ".so." VERSION)
+static const base::FilePath::CharType kSumoLib[] =
+ FILE_PATH_LITERAL("libffmpegsumo.so");
+#else
+#error "Do not know how to construct DSO name for this OS."
+#endif
+
+bool InitializeMediaLibraryInternal(const base::FilePath& module_dir) {
+ StubPathMap paths;
+
+ // First try to initialize with Chrome's sumo library.
+ DCHECK_EQ(kNumStubModules, 1);
+ paths[kModuleFfmpegsumo].push_back(module_dir.Append(kSumoLib).value());
+
+ // If that fails, see if any system libraries are available.
+ paths[kModuleFfmpegsumo].push_back(module_dir.Append(
+ FILE_PATH_LITERAL(DSO_NAME("avutil", AVUTIL_VERSION))).value());
+ paths[kModuleFfmpegsumo].push_back(module_dir.Append(
+ FILE_PATH_LITERAL(DSO_NAME("avcodec", AVCODEC_VERSION))).value());
+ paths[kModuleFfmpegsumo].push_back(module_dir.Append(
+ FILE_PATH_LITERAL(DSO_NAME("avformat", AVFORMAT_VERSION))).value());
+
+ return InitializeStubs(paths);
+}
+
+} // namespace internal
+} // namespace media
diff --git a/chromium/media/base/media_stub.cc b/chromium/media/base/media_stub.cc
new file mode 100644
index 00000000000..9efb37e2a0f
--- /dev/null
+++ b/chromium/media/base/media_stub.cc
@@ -0,0 +1,19 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media.h"
+
+#include "base/files/file_path.h"
+
+// This file is intended for platforms that don't need to load any media
+// libraries (e.g., iOS).
+namespace media {
+namespace internal {
+
+bool InitializeMediaLibraryInternal(const base::FilePath& module_dir) {
+ return true;
+}
+
+} // namespace internal
+} // namespace media
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
new file mode 100644
index 00000000000..2ebf5dfc7bf
--- /dev/null
+++ b/chromium/media/base/media_switches.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_switches.h"
+
+namespace switches {
+
+// Allow users to specify a custom buffer size for debugging purpose.
+const char kAudioBufferSize[] = "audio-buffer-size";
+
+// Enable EAC3 playback in MSE.
+const char kEnableEac3Playback[] = "enable-eac3-playback";
+
+// Enables Opus playback in media elements.
+const char kEnableOpusPlayback[] = "enable-opus-playback";
+
+// Enables VP8 Alpha playback in media elements.
+const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback";
+
+// Set number of threads to use for video decoding.
+const char kVideoThreads[] = "video-threads";
+
+// Override suppressed responses to canPlayType().
+const char kOverrideEncryptedMediaCanPlayType[] =
+ "override-encrypted-media-canplaytype";
+
+#if defined(GOOGLE_TV)
+// Use external video surface for video with more than or equal pixels to
+// specified value. For example, value of 0 will enable external video surface
+// for all videos, and value of 921600 (=1280*720) will enable external video
+// surface for 720p video and larger.
+const char kUseExternalVideoSurfaceThresholdInPixels[] =
+ "use-external-video-surface-threshold-in-pixels";
+#endif
+
+#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
+// The Alsa device to use when opening an audio input stream.
+const char kAlsaInputDevice[] = "alsa-input-device";
+// The Alsa device to use when opening an audio stream.
+const char kAlsaOutputDevice[] = "alsa-output-device";
+#endif
+
+#if defined(OS_MACOSX)
+// Unlike other platforms, OSX requires CoreAudio calls to happen on the main
+// thread of the process. Provide a way to disable this until support is well
+// tested. See http://crbug.com/158170.
+// TODO(dalecurtis): Remove this once we're sure nothing has exploded.
+const char kDisableMainThreadAudio[] = "disable-main-thread-audio";
+#endif
+
+#if defined(OS_WIN)
+// Use exclusive mode audio streaming for Windows Vista and higher.
+// Leads to lower latencies for audio streams which uses the
+// AudioParameters::AUDIO_PCM_LOW_LATENCY audio path.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/dd370844.aspx
+// for details.
+const char kEnableExclusiveAudio[] = "enable-exclusive-audio";
+
+// Used to troubleshoot problems with different video capture implementations
+// on Windows. By default we use the Media Foundation API on Windows 7 and up,
+// but specifying this switch will force use of DirectShow always.
+// See bug: http://crbug.com/268412
+const char kForceDirectShowVideoCapture[] = "force-directshow";
+
+// Use Windows WaveOut/In audio API even if Core Audio is supported.
+const char kForceWaveAudio[] = "force-wave-audio";
+
+// Instead of always using the hardware channel layout, check if a driver
+// supports the source channel layout. Avoids outputting empty channels and
+// permits drivers to enable stereo to multichannel expansion. Kept behind a
+// flag since some drivers lie about supported layouts and hang when used. See
+// http://crbug.com/259165 for more details.
+const char kTrySupportedChannelLayouts[] = "try-supported-channel-layouts";
+
+// Number of buffers to use for WaveOut.
+const char kWaveOutBuffers[] = "waveout-buffers";
+#endif
+
+#if defined(USE_CRAS)
+// Use CRAS, the ChromeOS audio server.
+const char kUseCras[] = "use-cras";
+#endif
+
+} // namespace switches
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
new file mode 100644
index 00000000000..e6c1de02fe4
--- /dev/null
+++ b/chromium/media/base/media_switches.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "media" command-line switches.
+
+#ifndef MEDIA_BASE_MEDIA_SWITCHES_H_
+#define MEDIA_BASE_MEDIA_SWITCHES_H_
+
+#include "build/build_config.h"
+#include "media/base/media_export.h"
+
+namespace switches {
+
+MEDIA_EXPORT extern const char kAudioBufferSize[];
+
+MEDIA_EXPORT extern const char kEnableEac3Playback[];
+
+MEDIA_EXPORT extern const char kEnableOpusPlayback[];
+
+MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[];
+
+MEDIA_EXPORT extern const char kVideoThreads[];
+
+MEDIA_EXPORT extern const char kOverrideEncryptedMediaCanPlayType[];
+
+#if defined(GOOGLE_TV)
+MEDIA_EXPORT extern const char kUseExternalVideoSurfaceThresholdInPixels[];
+#endif
+
+#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
+MEDIA_EXPORT extern const char kAlsaInputDevice[];
+MEDIA_EXPORT extern const char kAlsaOutputDevice[];
+#endif
+
+#if defined(OS_MACOSX)
+MEDIA_EXPORT extern const char kDisableMainThreadAudio[];
+#endif
+
+#if defined(OS_WIN)
+MEDIA_EXPORT extern const char kEnableExclusiveAudio[];
+MEDIA_EXPORT extern const char kForceDirectShowVideoCapture[];
+MEDIA_EXPORT extern const char kForceWaveAudio[];
+MEDIA_EXPORT extern const char kTrySupportedChannelLayouts[];
+MEDIA_EXPORT extern const char kWaveOutBuffers[];
+#endif
+
+#if defined(USE_CRAS)
+MEDIA_EXPORT extern const char kUseCras[];
+#endif
+
+} // namespace switches
+
+#endif // MEDIA_BASE_MEDIA_SWITCHES_H_
diff --git a/chromium/media/base/media_win.cc b/chromium/media/base/media_win.cc
new file mode 100644
index 00000000000..43bf6a7f458
--- /dev/null
+++ b/chromium/media/base/media_win.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media.h"
+
+#include <windows.h>
+#if defined(_WIN32_WINNT_WIN8)
+// The Windows 8 SDK defines FACILITY_VISUALCPP in winerror.h.
+#undef FACILITY_VISUALCPP
+#endif
+#include <delayimp.h>
+
+#include "base/files/file_path.h"
+
+#pragma comment(lib, "delayimp.lib")
+
+namespace media {
+namespace internal {
+
+bool InitializeMediaLibraryInternal(const base::FilePath& module_dir) {
+ // LoadLibraryEx(..., LOAD_WITH_ALTERED_SEARCH_PATH) cannot handle
+ // relative path.
+ if (!module_dir.IsAbsolute())
+ return false;
+
+ // Use alternate DLL search path so we don't load dependencies from the
+ // system path. Refer to http://crbug.com/35857
+ static const char kFFmpegDLL[] = "ffmpegsumo.dll";
+ HMODULE lib = ::LoadLibraryEx(
+ module_dir.AppendASCII(kFFmpegDLL).value().c_str(), NULL,
+ LOAD_WITH_ALTERED_SEARCH_PATH);
+
+ // Check that we loaded the library successfully.
+ return lib != NULL;
+}
+
+} // namespace internal
+} // namespace media
diff --git a/chromium/media/base/mock_audio_renderer_sink.cc b/chromium/media/base/mock_audio_renderer_sink.cc
new file mode 100644
index 00000000000..b21eb19ba13
--- /dev/null
+++ b/chromium/media/base/mock_audio_renderer_sink.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mock_audio_renderer_sink.h"
+
+namespace media {
+
+MockAudioRendererSink::MockAudioRendererSink() {}
+MockAudioRendererSink::~MockAudioRendererSink() {}
+
+void MockAudioRendererSink::Initialize(const AudioParameters& params,
+ RenderCallback* renderer) {
+ callback_ = renderer;
+}
+
+} // namespace media
diff --git a/chromium/media/base/mock_audio_renderer_sink.h b/chromium/media/base/mock_audio_renderer_sink.h
new file mode 100644
index 00000000000..00a4092893d
--- /dev/null
+++ b/chromium/media/base/mock_audio_renderer_sink.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
+#define MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
+
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_renderer_sink.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockAudioRendererSink : public AudioRendererSink {
+ public:
+ MockAudioRendererSink();
+
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Stop, void());
+ MOCK_METHOD0(Pause, void());
+ MOCK_METHOD0(Play, void());
+ MOCK_METHOD1(SetVolume, bool(double volume));
+
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* renderer) OVERRIDE;
+ AudioRendererSink::RenderCallback* callback() { return callback_; }
+
+ protected:
+ virtual ~MockAudioRendererSink();
+
+ private:
+ RenderCallback* callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockAudioRendererSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
diff --git a/chromium/media/base/mock_data_source_host.cc b/chromium/media/base/mock_data_source_host.cc
new file mode 100644
index 00000000000..eff0b78f163
--- /dev/null
+++ b/chromium/media/base/mock_data_source_host.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mock_data_source_host.h"
+
+namespace media {
+
+MockDataSourceHost::MockDataSourceHost() {}
+
+MockDataSourceHost::~MockDataSourceHost() {}
+
+} // namespace media
diff --git a/chromium/media/base/mock_data_source_host.h b/chromium/media/base/mock_data_source_host.h
new file mode 100644
index 00000000000..914d0556136
--- /dev/null
+++ b/chromium/media/base/mock_data_source_host.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
+#define MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
+
+#include <string>
+
+#include "media/base/data_source.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockDataSourceHost : public DataSourceHost {
+ public:
+ MockDataSourceHost();
+ virtual ~MockDataSourceHost();
+
+ // DataSourceHost implementation.
+ MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
+ MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
+ MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
+ base::TimeDelta end));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockDataSourceHost);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
diff --git a/chromium/media/base/mock_demuxer_host.cc b/chromium/media/base/mock_demuxer_host.cc
new file mode 100644
index 00000000000..100787f23af
--- /dev/null
+++ b/chromium/media/base/mock_demuxer_host.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mock_demuxer_host.h"
+
+namespace media {
+
+MockDemuxerHost::MockDemuxerHost() {}
+
+MockDemuxerHost::~MockDemuxerHost() {}
+
+} // namespace media
diff --git a/chromium/media/base/mock_demuxer_host.h b/chromium/media/base/mock_demuxer_host.h
new file mode 100644
index 00000000000..597c13298c5
--- /dev/null
+++ b/chromium/media/base/mock_demuxer_host.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef MEDIA_BASE_MOCK_DEMUXER_HOST_H_
+#define MEDIA_BASE_MOCK_DEMUXER_HOST_H_
+
+#include <string>
+
+#include "media/base/demuxer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockDemuxerHost : public DemuxerHost {
+ public:
+ MockDemuxerHost();
+ virtual ~MockDemuxerHost();
+
+ // DataSourceHost implementation.
+ MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
+ MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
+ MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
+ base::TimeDelta end));
+
+ // DemuxerHost implementation.
+ MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
+ MOCK_METHOD1(SetDuration, void(base::TimeDelta duration));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockDemuxerHost);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOCK_DEMUXER_HOST_H_
diff --git a/chromium/media/base/mock_filters.cc b/chromium/media/base/mock_filters.cc
new file mode 100644
index 00000000000..eaf52013cda
--- /dev/null
+++ b/chromium/media/base/mock_filters.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mock_filters.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::NotNull;
+using ::testing::Return;
+
+namespace media {
+
+MockDemuxer::MockDemuxer() {}
+
+MockDemuxer::~MockDemuxer() {}
+
+MockDemuxerStream::MockDemuxerStream(DemuxerStream::Type type) : type_(type) {}
+
+MockDemuxerStream::~MockDemuxerStream() {}
+
+DemuxerStream::Type MockDemuxerStream::type() {
+ return type_;
+}
+
+AudioDecoderConfig MockDemuxerStream::audio_decoder_config() {
+ DCHECK_EQ(type_, DemuxerStream::AUDIO);
+ return audio_decoder_config_;
+}
+
+VideoDecoderConfig MockDemuxerStream::video_decoder_config() {
+ DCHECK_EQ(type_, DemuxerStream::VIDEO);
+ return video_decoder_config_;
+}
+
+void MockDemuxerStream::set_audio_decoder_config(
+ const AudioDecoderConfig& config) {
+ DCHECK_EQ(type_, DemuxerStream::AUDIO);
+ audio_decoder_config_ = config;
+}
+
+void MockDemuxerStream::set_video_decoder_config(
+ const VideoDecoderConfig& config) {
+ DCHECK_EQ(type_, DemuxerStream::VIDEO);
+ video_decoder_config_ = config;
+}
+
+MockVideoDecoder::MockVideoDecoder() {
+ EXPECT_CALL(*this, HasAlpha()).WillRepeatedly(Return(false));
+}
+
+MockVideoDecoder::~MockVideoDecoder() {}
+
+MockAudioDecoder::MockAudioDecoder() {}
+
+MockAudioDecoder::~MockAudioDecoder() {}
+
+MockVideoRenderer::MockVideoRenderer() {}
+
+MockVideoRenderer::~MockVideoRenderer() {}
+
+MockAudioRenderer::MockAudioRenderer() {}
+
+MockAudioRenderer::~MockAudioRenderer() {}
+
+MockDecryptor::MockDecryptor() {}
+
+MockDecryptor::~MockDecryptor() {}
+
+MockStatisticsCB::MockStatisticsCB() {}
+
+MockStatisticsCB::~MockStatisticsCB() {}
+
+} // namespace media
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
new file mode 100644
index 00000000000..fb5e8a0dfd8
--- /dev/null
+++ b/chromium/media/base/mock_filters.h
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MOCK_FILTERS_H_
+#define MEDIA_BASE_MOCK_FILTERS_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_renderer.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/decryptor.h"
+#include "media/base/demuxer.h"
+#include "media/base/filter_collection.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/video_decoder.h"
+#include "media/base/video_decoder_config.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_renderer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockDemuxer : public Demuxer {
+ public:
+ MockDemuxer();
+ virtual ~MockDemuxer();
+
+ // Demuxer implementation.
+ MOCK_METHOD2(Initialize, void(DemuxerHost* host, const PipelineStatusCB& cb));
+ MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
+ MOCK_METHOD2(Seek, void(base::TimeDelta time, const PipelineStatusCB& cb));
+ MOCK_METHOD1(Stop, void(const base::Closure& callback));
+ MOCK_METHOD0(OnAudioRendererDisabled, void());
+ MOCK_METHOD1(GetStream, DemuxerStream*(DemuxerStream::Type));
+ MOCK_CONST_METHOD0(GetStartTime, base::TimeDelta());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockDemuxer);
+};
+
+class MockDemuxerStream : public DemuxerStream {
+ public:
+ explicit MockDemuxerStream(DemuxerStream::Type type);
+ virtual ~MockDemuxerStream();
+
+ // DemuxerStream implementation.
+ virtual Type type() OVERRIDE;
+ MOCK_METHOD1(Read, void(const ReadCB& read_cb));
+ virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
+ virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
+ MOCK_METHOD0(EnableBitstreamConverter, void());
+
+ void set_audio_decoder_config(const AudioDecoderConfig& config);
+ void set_video_decoder_config(const VideoDecoderConfig& config);
+
+ private:
+ DemuxerStream::Type type_;
+ AudioDecoderConfig audio_decoder_config_;
+ VideoDecoderConfig video_decoder_config_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockDemuxerStream);
+};
+
+class MockVideoDecoder : public VideoDecoder {
+ public:
+ MockVideoDecoder();
+ virtual ~MockVideoDecoder();
+
+ // VideoDecoder implementation.
+ MOCK_METHOD2(Initialize, void(const VideoDecoderConfig& config,
+ const PipelineStatusCB&));
+ MOCK_METHOD2(Decode, void(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB&));
+ MOCK_METHOD1(Reset, void(const base::Closure&));
+ MOCK_METHOD1(Stop, void(const base::Closure&));
+ MOCK_CONST_METHOD0(HasAlpha, bool());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockVideoDecoder);
+};
+
+class MockAudioDecoder : public AudioDecoder {
+ public:
+ MockAudioDecoder();
+ virtual ~MockAudioDecoder();
+
+ // AudioDecoder implementation.
+ MOCK_METHOD3(Initialize, void(DemuxerStream*,
+ const PipelineStatusCB&,
+ const StatisticsCB&));
+ MOCK_METHOD1(Read, void(const ReadCB&));
+ MOCK_METHOD0(bits_per_channel, int(void));
+ MOCK_METHOD0(channel_layout, ChannelLayout(void));
+ MOCK_METHOD0(samples_per_second, int(void));
+ MOCK_METHOD1(Reset, void(const base::Closure&));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioDecoder);
+};
+
+class MockVideoRenderer : public VideoRenderer {
+ public:
+ MockVideoRenderer();
+ virtual ~MockVideoRenderer();
+
+ // VideoRenderer implementation.
+ MOCK_METHOD9(Initialize, void(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const TimeCB& time_cb,
+ const NaturalSizeChangedCB& size_changed_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeDeltaCB& get_time_cb,
+ const TimeDeltaCB& get_duration_cb));
+ MOCK_METHOD1(Play, void(const base::Closure& callback));
+ MOCK_METHOD1(Pause, void(const base::Closure& callback));
+ MOCK_METHOD1(Flush, void(const base::Closure& callback));
+ MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
+ MOCK_METHOD1(Stop, void(const base::Closure& callback));
+ MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockVideoRenderer);
+};
+
+class MockAudioRenderer : public AudioRenderer {
+ public:
+ MockAudioRenderer();
+ virtual ~MockAudioRenderer();
+
+ // AudioRenderer implementation.
+ MOCK_METHOD8(Initialize, void(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const base::Closure& underflow_cb,
+ const TimeCB& time_cb,
+ const base::Closure& ended_cb,
+ const base::Closure& disabled_cb,
+ const PipelineStatusCB& error_cb));
+ MOCK_METHOD1(Play, void(const base::Closure& callback));
+ MOCK_METHOD1(Pause, void(const base::Closure& callback));
+ MOCK_METHOD1(Flush, void(const base::Closure& callback));
+ MOCK_METHOD1(Stop, void(const base::Closure& callback));
+ MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
+ MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
+ MOCK_METHOD1(SetVolume, void(float volume));
+ MOCK_METHOD0(ResumeAfterUnderflow, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioRenderer);
+};
+
+class MockDecryptor : public Decryptor {
+ public:
+ MockDecryptor();
+ virtual ~MockDecryptor();
+
+ MOCK_METHOD2(RegisterNewKeyCB, void(StreamType stream_type,
+ const NewKeyCB& new_key_cb));
+ MOCK_METHOD3(Decrypt, void(StreamType stream_type,
+ const scoped_refptr<DecoderBuffer>& encrypted,
+ const DecryptCB& decrypt_cb));
+ MOCK_METHOD1(CancelDecrypt, void(StreamType stream_type));
+ MOCK_METHOD2(InitializeAudioDecoder,
+ void(const AudioDecoderConfig& config,
+ const DecoderInitCB& init_cb));
+ MOCK_METHOD2(InitializeVideoDecoder,
+ void(const VideoDecoderConfig& config,
+ const DecoderInitCB& init_cb));
+ MOCK_METHOD2(DecryptAndDecodeAudio,
+ void(const scoped_refptr<media::DecoderBuffer>& encrypted,
+ const AudioDecodeCB& audio_decode_cb));
+ MOCK_METHOD2(DecryptAndDecodeVideo,
+ void(const scoped_refptr<media::DecoderBuffer>& encrypted,
+ const VideoDecodeCB& video_decode_cb));
+ MOCK_METHOD1(ResetDecoder, void(StreamType stream_type));
+ MOCK_METHOD1(DeinitializeDecoder, void(StreamType stream_type));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockDecryptor);
+};
+
+// Helper mock statistics callback.
+class MockStatisticsCB {
+ public:
+ MockStatisticsCB();
+ ~MockStatisticsCB();
+
+ MOCK_METHOD1(OnStatistics, void(const media::PipelineStatistics& statistics));
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOCK_FILTERS_H_
diff --git a/chromium/media/base/multi_channel_resampler.cc b/chromium/media/base/multi_channel_resampler.cc
new file mode 100644
index 00000000000..801e5344cf2
--- /dev/null
+++ b/chromium/media/base/multi_channel_resampler.cc
@@ -0,0 +1,111 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/multi_channel_resampler.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+MultiChannelResampler::MultiChannelResampler(int channels,
+ double io_sample_rate_ratio,
+ size_t request_size,
+ const ReadCB& read_cb)
+ : read_cb_(read_cb),
+ wrapped_resampler_audio_bus_(AudioBus::CreateWrapper(channels)),
+ output_frames_ready_(0) {
+ // Allocate each channel's resampler.
+ resamplers_.reserve(channels);
+ for (int i = 0; i < channels; ++i) {
+ resamplers_.push_back(new SincResampler(
+ io_sample_rate_ratio, request_size, base::Bind(
+ &MultiChannelResampler::ProvideInput, base::Unretained(this), i)));
+ }
+
+ // Setup the wrapped AudioBus for channel data.
+ wrapped_resampler_audio_bus_->set_frames(request_size);
+
+ // Allocate storage for all channels except the first, which will use the
+ // |destination| provided to ProvideInput() directly.
+ if (channels > 1) {
+ resampler_audio_bus_ = AudioBus::Create(channels - 1, request_size);
+ for (int i = 0; i < resampler_audio_bus_->channels(); ++i) {
+ wrapped_resampler_audio_bus_->SetChannelData(
+ i + 1, resampler_audio_bus_->channel(i));
+ }
+ }
+}
+
+MultiChannelResampler::~MultiChannelResampler() {}
+
+void MultiChannelResampler::Resample(int frames, AudioBus* audio_bus) {
+ DCHECK_EQ(static_cast<size_t>(audio_bus->channels()), resamplers_.size());
+
+ // Optimize the single channel case to avoid the chunking process below.
+ if (audio_bus->channels() == 1) {
+ resamplers_[0]->Resample(frames, audio_bus->channel(0));
+ return;
+ }
+
+ // We need to ensure that SincResampler only calls ProvideInput once for each
+ // channel. To ensure this, we chunk the number of requested frames into
+ // SincResampler::ChunkSize() sized chunks. SincResampler guarantees it will
+ // only call ProvideInput() once when we resample this way.
+ output_frames_ready_ = 0;
+ while (output_frames_ready_ < frames) {
+ int chunk_size = resamplers_[0]->ChunkSize();
+ int frames_this_time = std::min(frames - output_frames_ready_, chunk_size);
+
+ // Resample each channel.
+ for (size_t i = 0; i < resamplers_.size(); ++i) {
+ DCHECK_EQ(chunk_size, resamplers_[i]->ChunkSize());
+
+ // Depending on the sample-rate scale factor, and the internal buffering
+ // used in a SincResampler kernel, this call to Resample() will only
+ // sometimes call ProvideInput(). However, if it calls ProvideInput() for
+ // the first channel, then it will call it for the remaining channels,
+ // since they all buffer in the same way and are processing the same
+ // number of frames.
+ resamplers_[i]->Resample(
+ frames_this_time, audio_bus->channel(i) + output_frames_ready_);
+ }
+
+ output_frames_ready_ += frames_this_time;
+ }
+}
+
+void MultiChannelResampler::ProvideInput(int channel,
+ int frames,
+ float* destination) {
+ // Get the data from the multi-channel provider when the first channel asks
+ // for it. For subsequent channels, we can just dish out the channel data
+ // from that (stored in |resampler_audio_bus_|).
+ if (channel == 0) {
+ wrapped_resampler_audio_bus_->SetChannelData(0, destination);
+ read_cb_.Run(output_frames_ready_, wrapped_resampler_audio_bus_.get());
+ } else {
+ // All channels must ask for the same amount. This should always be the
+ // case, but let's just make sure.
+ DCHECK_EQ(frames, wrapped_resampler_audio_bus_->frames());
+
+ // Copy the channel data from what we received from |read_cb_|.
+ memcpy(destination, wrapped_resampler_audio_bus_->channel(channel),
+ sizeof(*wrapped_resampler_audio_bus_->channel(channel)) * frames);
+ }
+}
+
+void MultiChannelResampler::Flush() {
+ for (size_t i = 0; i < resamplers_.size(); ++i)
+ resamplers_[i]->Flush();
+}
+
+void MultiChannelResampler::SetRatio(double io_sample_rate_ratio) {
+ for (size_t i = 0; i < resamplers_.size(); ++i)
+ resamplers_[i]->SetRatio(io_sample_rate_ratio);
+}
+
+} // namespace media
diff --git a/chromium/media/base/multi_channel_resampler.h b/chromium/media/base/multi_channel_resampler.h
new file mode 100644
index 00000000000..148cb9da78a
--- /dev/null
+++ b/chromium/media/base/multi_channel_resampler.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
+#define MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "media/base/sinc_resampler.h"
+
+namespace media {
+class AudioBus;
+
+// MultiChannelResampler is a multi channel wrapper for SincResampler; allowing
+// high quality sample rate conversion of multiple channels at once.
+class MEDIA_EXPORT MultiChannelResampler {
+ public:
+ // Callback type for providing more data into the resampler. Expects AudioBus
+ // to be completely filled with data upon return; zero padded if not enough
+ // frames are available to satisfy the request. |frame_delay| is the number
+ // of output frames already processed and can be used to estimate delay.
+ typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
+
+ // Constructs a MultiChannelResampler with the specified |read_cb|, which is
+ // used to acquire audio data for resampling. |io_sample_rate_ratio| is the
+ // ratio of input / output sample rates. |request_frames| is the size in
+ // frames of the AudioBus to be filled by |read_cb|.
+ MultiChannelResampler(int channels,
+ double io_sample_rate_ratio,
+ size_t request_frames,
+ const ReadCB& read_cb);
+ virtual ~MultiChannelResampler();
+
+ // Resamples |frames| of data from |read_cb_| into AudioBus.
+ void Resample(int frames, AudioBus* audio_bus);
+
+ // Flush all buffered data and reset internal indices. Not thread safe, do
+ // not call while Resample() is in progress.
+ void Flush();
+
+ // Update ratio for all SincResamplers. SetRatio() will cause reconstruction
+ // of the kernels used for resampling. Not thread safe, do not call while
+ // Resample() is in progress.
+ void SetRatio(double io_sample_rate_ratio);
+
+ private:
+ // SincResampler::ReadCB implementation. ProvideInput() will be called for
+ // each channel (in channel order) as SincResampler needs more data.
+ void ProvideInput(int channel, int frames, float* destination);
+
+ // Source of data for resampling.
+ ReadCB read_cb_;
+
+ // Each channel has its own high quality resampler.
+ ScopedVector<SincResampler> resamplers_;
+
+ // Buffers for audio data going into SincResampler from ReadCB.
+ scoped_ptr<AudioBus> resampler_audio_bus_;
+
+ // To avoid a memcpy() on the first channel we create a wrapped AudioBus where
+ // the first channel points to the |destination| provided to ProvideInput().
+ scoped_ptr<AudioBus> wrapped_resampler_audio_bus_;
+
+ // The number of output frames that have successfully been processed during
+ // the current Resample() call.
+ int output_frames_ready_;
+
+ DISALLOW_COPY_AND_ASSIGN(MultiChannelResampler);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
diff --git a/chromium/media/base/multi_channel_resampler_unittest.cc b/chromium/media/base/multi_channel_resampler_unittest.cc
new file mode 100644
index 00000000000..efaf0c54bdd
--- /dev/null
+++ b/chromium/media/base/multi_channel_resampler_unittest.cc
@@ -0,0 +1,139 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_bus.h"
+#include "media/base/multi_channel_resampler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Just test a basic resampling case. The SincResampler unit test will take
+// care of accuracy testing; we just need to check that multichannel works as
+// expected within some tolerance.
+static const float kScaleFactor = 192000.0f / 44100.0f;
+
+// Simulate large and small sample requests used by the different audio paths.
+static const int kHighLatencySize = 8192;
+// Low latency buffers show a larger error than high latency ones. Which makes
+// sense since each error represents a larger portion of the total request.
+static const int kLowLatencySize = 128;
+
+// Test fill value.
+static const float kFillValue = 0.1f;
+
+// Chosen arbitrarily based on what each resampler reported during testing.
+static const double kLowLatencyMaxRMSError = 0.0036;
+static const double kLowLatencyMaxError = 0.04;
+static const double kHighLatencyMaxRMSError = 0.0036;
+static const double kHighLatencyMaxError = 0.04;
+
+class MultiChannelResamplerTest
+ : public testing::TestWithParam<int> {
+ public:
+ MultiChannelResamplerTest()
+ : last_frame_delay_(-1) {
+ }
+ virtual ~MultiChannelResamplerTest() {}
+
+ void InitializeAudioData(int channels, int frames) {
+ frames_ = frames;
+ audio_bus_ = AudioBus::Create(channels, frames);
+ }
+
+ // MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just
+ // fills the provided audio_data with |kFillValue|.
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ EXPECT_GE(frame_delay, last_frame_delay_);
+ last_frame_delay_ = frame_delay;
+
+ float fill_value = fill_junk_values_ ? (1 / kFillValue) : kFillValue;
+ EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
+ for (int i = 0; i < audio_bus->channels(); ++i)
+ for (int j = 0; j < audio_bus->frames(); ++j)
+ audio_bus->channel(i)[j] = fill_value;
+ }
+
+ void MultiChannelTest(int channels, int frames, double expected_max_rms_error,
+ double expected_max_error) {
+ InitializeAudioData(channels, frames);
+ MultiChannelResampler resampler(
+ channels, kScaleFactor, SincResampler::kDefaultRequestSize, base::Bind(
+ &MultiChannelResamplerTest::ProvideInput, base::Unretained(this)));
+
+ // First prime the resampler with some junk data, so we can verify Flush().
+ fill_junk_values_ = true;
+ resampler.Resample(1, audio_bus_.get());
+ resampler.Flush();
+ fill_junk_values_ = false;
+
+ // The last frame delay should be strictly less than the total frame count.
+ EXPECT_LT(last_frame_delay_, audio_bus_->frames());
+ last_frame_delay_ = -1;
+
+ // If Flush() didn't work, the rest of the tests will fail.
+ resampler.Resample(frames, audio_bus_.get());
+ TestValues(expected_max_rms_error, expected_max_error);
+ }
+
+ void HighLatencyTest(int channels) {
+ MultiChannelTest(channels, kHighLatencySize, kHighLatencyMaxRMSError,
+ kHighLatencyMaxError);
+ }
+
+ void LowLatencyTest(int channels) {
+ MultiChannelTest(channels, kLowLatencySize, kLowLatencyMaxRMSError,
+ kLowLatencyMaxError);
+ }
+
+ void TestValues(double expected_max_rms_error, double expected_max_error ) {
+ // Calculate Root-Mean-Square-Error for the resampling.
+ double max_error = 0.0;
+ double sum_of_squares = 0.0;
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ for (int j = 0; j < frames_; ++j) {
+ // Ensure all values are accounted for.
+ ASSERT_NE(audio_bus_->channel(i)[j], 0);
+
+ double error = fabs(audio_bus_->channel(i)[j] - kFillValue);
+ max_error = std::max(max_error, error);
+ sum_of_squares += error * error;
+ }
+ }
+
+ double rms_error = sqrt(
+ sum_of_squares / (frames_ * audio_bus_->channels()));
+
+ EXPECT_LE(rms_error, expected_max_rms_error);
+ EXPECT_LE(max_error, expected_max_error);
+ }
+
+ protected:
+ int frames_;
+ bool fill_junk_values_;
+ scoped_ptr<AudioBus> audio_bus_;
+ int last_frame_delay_;
+
+ DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest);
+};
+
+TEST_P(MultiChannelResamplerTest, HighLatency) {
+ HighLatencyTest(GetParam());
+}
+
+TEST_P(MultiChannelResamplerTest, LowLatency) {
+ LowLatencyTest(GetParam());
+}
+
+// Test common channel layouts: mono, stereo, 5.1, 7.1.
+INSTANTIATE_TEST_CASE_P(
+ MultiChannelResamplerTest, MultiChannelResamplerTest,
+ testing::Values(1, 2, 6, 8));
+
+} // namespace media
diff --git a/chromium/media/base/pipeline.cc b/chromium/media/base/pipeline.cc
new file mode 100644
index 00000000000..ccac81991c4
--- /dev/null
+++ b/chromium/media/base/pipeline.cc
@@ -0,0 +1,947 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/pipeline.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/audio_renderer.h"
+#include "media/base/clock.h"
+#include "media/base/filter_collection.h"
+#include "media/base/media_log.h"
+#include "media/base/video_decoder.h"
+#include "media/base/video_decoder_config.h"
+#include "media/base/video_renderer.h"
+
+using base::TimeDelta;
+
+namespace media {
+
+Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ MediaLog* media_log)
+ : message_loop_(message_loop),
+ media_log_(media_log),
+ running_(false),
+ did_loading_progress_(false),
+ total_bytes_(0),
+ natural_size_(0, 0),
+ volume_(1.0f),
+ playback_rate_(0.0f),
+ clock_(new Clock(&default_tick_clock_)),
+ waiting_for_clock_update_(false),
+ status_(PIPELINE_OK),
+ has_audio_(false),
+ has_video_(false),
+ state_(kCreated),
+ audio_ended_(false),
+ video_ended_(false),
+ audio_disabled_(false),
+ demuxer_(NULL),
+ creation_time_(default_tick_clock_.NowTicks()) {
+ media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
+ media_log_->AddEvent(
+ media_log_->CreateEvent(MediaLogEvent::PIPELINE_CREATED));
+}
+
+Pipeline::~Pipeline() {
+ DCHECK(thread_checker_.CalledOnValidThread())
+ << "Pipeline must be destroyed on same thread that created it";
+ DCHECK(!running_) << "Stop() must complete before destroying object";
+ DCHECK(stop_cb_.is_null());
+ DCHECK(seek_cb_.is_null());
+
+ media_log_->AddEvent(
+ media_log_->CreateEvent(MediaLogEvent::PIPELINE_DESTROYED));
+}
+
+void Pipeline::Start(scoped_ptr<FilterCollection> collection,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const PipelineStatusCB& seek_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& duration_change_cb) {
+ base::AutoLock auto_lock(lock_);
+ CHECK(!running_) << "Media pipeline is already running";
+ DCHECK(!buffering_state_cb.is_null());
+
+ running_ = true;
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::StartTask, base::Unretained(this), base::Passed(&collection),
+ ended_cb, error_cb, seek_cb, buffering_state_cb, duration_change_cb));
+}
+
+void Pipeline::Stop(const base::Closure& stop_cb) {
+ base::AutoLock auto_lock(lock_);
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::StopTask, base::Unretained(this), stop_cb));
+}
+
+void Pipeline::Seek(TimeDelta time, const PipelineStatusCB& seek_cb) {
+ base::AutoLock auto_lock(lock_);
+ if (!running_) {
+ NOTREACHED() << "Media pipeline isn't running";
+ return;
+ }
+
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::SeekTask, base::Unretained(this), time, seek_cb));
+}
+
+bool Pipeline::IsRunning() const {
+ base::AutoLock auto_lock(lock_);
+ return running_;
+}
+
+bool Pipeline::HasAudio() const {
+ base::AutoLock auto_lock(lock_);
+ return has_audio_;
+}
+
+bool Pipeline::HasVideo() const {
+ base::AutoLock auto_lock(lock_);
+ return has_video_;
+}
+
+float Pipeline::GetPlaybackRate() const {
+ base::AutoLock auto_lock(lock_);
+ return playback_rate_;
+}
+
+void Pipeline::SetPlaybackRate(float playback_rate) {
+ if (playback_rate < 0.0f)
+ return;
+
+ base::AutoLock auto_lock(lock_);
+ playback_rate_ = playback_rate;
+ if (running_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::PlaybackRateChangedTask, base::Unretained(this),
+ playback_rate));
+ }
+}
+
+float Pipeline::GetVolume() const {
+ base::AutoLock auto_lock(lock_);
+ return volume_;
+}
+
+void Pipeline::SetVolume(float volume) {
+ if (volume < 0.0f || volume > 1.0f)
+ return;
+
+ base::AutoLock auto_lock(lock_);
+ volume_ = volume;
+ if (running_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::VolumeChangedTask, base::Unretained(this), volume));
+ }
+}
+
+TimeDelta Pipeline::GetMediaTime() const {
+ base::AutoLock auto_lock(lock_);
+ return clock_->Elapsed();
+}
+
+Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() {
+ base::AutoLock auto_lock(lock_);
+ Ranges<TimeDelta> time_ranges;
+ for (size_t i = 0; i < buffered_time_ranges_.size(); ++i) {
+ time_ranges.Add(buffered_time_ranges_.start(i),
+ buffered_time_ranges_.end(i));
+ }
+ if (clock_->Duration() == TimeDelta() || total_bytes_ == 0)
+ return time_ranges;
+ for (size_t i = 0; i < buffered_byte_ranges_.size(); ++i) {
+ TimeDelta start = TimeForByteOffset_Locked(buffered_byte_ranges_.start(i));
+ TimeDelta end = TimeForByteOffset_Locked(buffered_byte_ranges_.end(i));
+ // Cap approximated buffered time at the length of the video.
+ end = std::min(end, clock_->Duration());
+ time_ranges.Add(start, end);
+ }
+
+ return time_ranges;
+}
+
+TimeDelta Pipeline::GetMediaDuration() const {
+ base::AutoLock auto_lock(lock_);
+ return clock_->Duration();
+}
+
+int64 Pipeline::GetTotalBytes() const {
+ base::AutoLock auto_lock(lock_);
+ return total_bytes_;
+}
+
+void Pipeline::GetNaturalVideoSize(gfx::Size* out_size) const {
+ CHECK(out_size);
+ base::AutoLock auto_lock(lock_);
+ *out_size = natural_size_;
+}
+
+bool Pipeline::DidLoadingProgress() const {
+ base::AutoLock auto_lock(lock_);
+ bool ret = did_loading_progress_;
+ did_loading_progress_ = false;
+ return ret;
+}
+
+PipelineStatistics Pipeline::GetStatistics() const {
+ base::AutoLock auto_lock(lock_);
+ return statistics_;
+}
+
+void Pipeline::SetClockForTesting(Clock* clock) {
+ clock_.reset(clock);
+}
+
+void Pipeline::SetErrorForTesting(PipelineStatus status) {
+ SetError(status);
+}
+
+void Pipeline::SetState(State next_state) {
+ if (state_ != kStarted && next_state == kStarted &&
+ !creation_time_.is_null()) {
+ UMA_HISTOGRAM_TIMES("Media.TimeToPipelineStarted",
+ default_tick_clock_.NowTicks() - creation_time_);
+ creation_time_ = base::TimeTicks();
+ }
+
+ DVLOG(2) << GetStateString(state_) << " -> " << GetStateString(next_state);
+
+ state_ = next_state;
+ media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(next_state));
+}
+
+#define RETURN_STRING(state) case state: return #state;
+
+const char* Pipeline::GetStateString(State state) {
+ switch (state) {
+ RETURN_STRING(kCreated);
+ RETURN_STRING(kInitDemuxer);
+ RETURN_STRING(kInitAudioRenderer);
+ RETURN_STRING(kInitVideoRenderer);
+ RETURN_STRING(kInitPrerolling);
+ RETURN_STRING(kSeeking);
+ RETURN_STRING(kStarting);
+ RETURN_STRING(kStarted);
+ RETURN_STRING(kStopping);
+ RETURN_STRING(kStopped);
+ }
+ NOTREACHED();
+ return "INVALID";
+}
+
+#undef RETURN_STRING
+
+Pipeline::State Pipeline::GetNextState() const {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(stop_cb_.is_null())
+ << "State transitions don't happen when stopping";
+ DCHECK_EQ(status_, PIPELINE_OK)
+ << "State transitions don't happen when there's an error: " << status_;
+
+ switch (state_) {
+ case kCreated:
+ return kInitDemuxer;
+
+ case kInitDemuxer:
+ if (demuxer_->GetStream(DemuxerStream::AUDIO))
+ return kInitAudioRenderer;
+ if (demuxer_->GetStream(DemuxerStream::VIDEO))
+ return kInitVideoRenderer;
+ return kInitPrerolling;
+
+ case kInitAudioRenderer:
+ if (demuxer_->GetStream(DemuxerStream::VIDEO))
+ return kInitVideoRenderer;
+ return kInitPrerolling;
+
+ case kInitVideoRenderer:
+ return kInitPrerolling;
+
+ case kInitPrerolling:
+ return kStarting;
+
+ case kSeeking:
+ return kStarting;
+
+ case kStarting:
+ return kStarted;
+
+ case kStarted:
+ case kStopping:
+ case kStopped:
+ break;
+ }
+ NOTREACHED() << "State has no transition: " << state_;
+ return state_;
+}
+
+void Pipeline::OnDemuxerError(PipelineStatus error) {
+ SetError(error);
+}
+
+void Pipeline::SetError(PipelineStatus error) {
+ DCHECK(IsRunning());
+ DCHECK_NE(PIPELINE_OK, error);
+ VLOG(1) << "Media pipeline error: " << error;
+
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::ErrorChangedTask, base::Unretained(this), error));
+
+ media_log_->AddEvent(media_log_->CreatePipelineErrorEvent(error));
+}
+
+void Pipeline::OnAudioDisabled() {
+ DCHECK(IsRunning());
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::AudioDisabledTask, base::Unretained(this)));
+ media_log_->AddEvent(
+ media_log_->CreateEvent(MediaLogEvent::AUDIO_RENDERER_DISABLED));
+}
+
+void Pipeline::OnAudioTimeUpdate(TimeDelta time, TimeDelta max_time) {
+ DCHECK_LE(time.InMicroseconds(), max_time.InMicroseconds());
+ DCHECK(IsRunning());
+ base::AutoLock auto_lock(lock_);
+
+ if (!has_audio_)
+ return;
+ if (waiting_for_clock_update_ && time < clock_->Elapsed())
+ return;
+
+ // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
+ // http://crbug.com/137973
+ if (state_ == kSeeking)
+ return;
+
+ clock_->SetTime(time, max_time);
+ StartClockIfWaitingForTimeUpdate_Locked();
+}
+
+void Pipeline::OnVideoTimeUpdate(TimeDelta max_time) {
+ DCHECK(IsRunning());
+ base::AutoLock auto_lock(lock_);
+
+ if (has_audio_)
+ return;
+
+ // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
+ // http://crbug.com/137973
+ if (state_ == kSeeking)
+ return;
+
+ DCHECK(!waiting_for_clock_update_);
+ clock_->SetMaxTime(max_time);
+}
+
+void Pipeline::SetDuration(TimeDelta duration) {
+ DCHECK(IsRunning());
+ media_log_->AddEvent(
+ media_log_->CreateTimeEvent(
+ MediaLogEvent::DURATION_SET, "duration", duration));
+ UMA_HISTOGRAM_LONG_TIMES("Media.Duration", duration);
+
+ base::AutoLock auto_lock(lock_);
+ clock_->SetDuration(duration);
+ if (!duration_change_cb_.is_null())
+ duration_change_cb_.Run();
+}
+
+void Pipeline::SetTotalBytes(int64 total_bytes) {
+ DCHECK(IsRunning());
+ media_log_->AddEvent(
+ media_log_->CreateStringEvent(
+ MediaLogEvent::TOTAL_BYTES_SET, "total_bytes",
+ base::Int64ToString(total_bytes)));
+ int64 total_mbytes = total_bytes >> 20;
+ if (total_mbytes > kint32max)
+ total_mbytes = kint32max;
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "Media.TotalMBytes", static_cast<int32>(total_mbytes), 1, kint32max, 50);
+
+ base::AutoLock auto_lock(lock_);
+ total_bytes_ = total_bytes;
+}
+
+TimeDelta Pipeline::TimeForByteOffset_Locked(int64 byte_offset) const {
+ lock_.AssertAcquired();
+ TimeDelta time_offset = byte_offset * clock_->Duration() / total_bytes_;
+ // Since the byte->time calculation is approximate, fudge the beginning &
+ // ending areas to look better.
+ TimeDelta epsilon = clock_->Duration() / 100;
+ if (time_offset < epsilon)
+ return TimeDelta();
+ if (time_offset + epsilon > clock_->Duration())
+ return clock_->Duration();
+ return time_offset;
+}
+
+void Pipeline::OnStateTransition(PipelineStatus status) {
+ // Force post to process state transitions after current execution frame.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::StateTransitionTask, base::Unretained(this), status));
+}
+
+void Pipeline::StateTransitionTask(PipelineStatus status) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // No-op any state transitions if we're stopping.
+ if (state_ == kStopping || state_ == kStopped)
+ return;
+
+ // Preserve existing abnormal status, otherwise update based on the result of
+ // the previous operation.
+ status_ = (status_ != PIPELINE_OK ? status_ : status);
+
+ if (status_ != PIPELINE_OK) {
+ ErrorChangedTask(status_);
+ return;
+ }
+
+ // Guard against accidentally clearing |pending_callbacks_| for states that
+ // use it as well as states that should not be using it.
+ //
+ // TODO(scherkus): Make every state transition use |pending_callbacks_|.
+ DCHECK_EQ(pending_callbacks_.get() != NULL,
+ (state_ == kInitPrerolling || state_ == kStarting ||
+ state_ == kSeeking));
+ pending_callbacks_.reset();
+
+ PipelineStatusCB done_cb = base::Bind(
+ &Pipeline::OnStateTransition, base::Unretained(this));
+
+ // Switch states, performing any entrance actions for the new state as well.
+ SetState(GetNextState());
+ switch (state_) {
+ case kInitDemuxer:
+ return InitializeDemuxer(done_cb);
+
+ case kInitAudioRenderer:
+ return InitializeAudioRenderer(done_cb);
+
+ case kInitVideoRenderer:
+ return InitializeVideoRenderer(done_cb);
+
+ case kInitPrerolling:
+ filter_collection_.reset();
+ {
+ base::AutoLock l(lock_);
+ // We do not want to start the clock running. We only want to set the
+ // base media time so our timestamp calculations will be correct.
+ clock_->SetTime(demuxer_->GetStartTime(), demuxer_->GetStartTime());
+
+ // TODO(scherkus): |has_audio_| should be true no matter what --
+ // otherwise people with muted/disabled sound cards will make our
+ // default controls look as if every video doesn't contain an audio
+ // track.
+ has_audio_ = audio_renderer_ != NULL && !audio_disabled_;
+ has_video_ = video_renderer_ != NULL;
+ }
+ if (!audio_renderer_ && !video_renderer_) {
+ done_cb.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
+ return;
+ }
+
+ buffering_state_cb_.Run(kHaveMetadata);
+
+ return DoInitialPreroll(done_cb);
+
+ case kStarting:
+ return DoPlay(done_cb);
+
+ case kStarted:
+ {
+ base::AutoLock l(lock_);
+ // We use audio stream to update the clock. So if there is such a
+ // stream, we pause the clock until we receive a valid timestamp.
+ waiting_for_clock_update_ = true;
+ if (!has_audio_) {
+ clock_->SetMaxTime(clock_->Duration());
+ StartClockIfWaitingForTimeUpdate_Locked();
+ }
+ }
+
+ DCHECK(!seek_cb_.is_null());
+ DCHECK_EQ(status_, PIPELINE_OK);
+
+ // Fire canplaythrough immediately after playback begins because of
+ // crbug.com/106480.
+ // TODO(vrk): set ready state to HaveFutureData when bug above is fixed.
+ buffering_state_cb_.Run(kPrerollCompleted);
+ return base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+
+ case kStopping:
+ case kStopped:
+ case kCreated:
+ case kSeeking:
+ NOTREACHED() << "State has no transition: " << state_;
+ return;
+ }
+}
+
+// Note that the usage of base::Unretained() with the audio/video renderers
+// in the following DoXXX() functions is considered safe as they are owned by
+// |pending_callbacks_| and share the same lifetime.
+//
+// That being said, deleting the renderers while keeping |pending_callbacks_|
+// running on the media thread would result in crashes.
+void Pipeline::DoInitialPreroll(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!pending_callbacks_.get());
+ SerialRunner::Queue bound_fns;
+
+ base::TimeDelta seek_timestamp = demuxer_->GetStartTime();
+
+ // Preroll renderers.
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Preroll, base::Unretained(audio_renderer_.get()),
+ seek_timestamp));
+ }
+
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
+ seek_timestamp));
+ }
+
+ pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
+}
+
+void Pipeline::DoSeek(
+ base::TimeDelta seek_timestamp,
+ const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!pending_callbacks_.get());
+ SerialRunner::Queue bound_fns;
+
+ // Pause.
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Pause, base::Unretained(audio_renderer_.get())));
+ }
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Pause, base::Unretained(video_renderer_.get())));
+ }
+
+ // Flush.
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Flush, base::Unretained(audio_renderer_.get())));
+ }
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Flush, base::Unretained(video_renderer_.get())));
+ }
+
+ // Seek demuxer.
+ bound_fns.Push(base::Bind(
+ &Demuxer::Seek, base::Unretained(demuxer_), seek_timestamp));
+
+ // Preroll renderers.
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Preroll, base::Unretained(audio_renderer_.get()),
+ seek_timestamp));
+ }
+
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
+ seek_timestamp));
+ }
+
+ pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
+}
+
+void Pipeline::DoPlay(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!pending_callbacks_.get());
+ SerialRunner::Queue bound_fns;
+
+ PlaybackRateChangedTask(GetPlaybackRate());
+ VolumeChangedTask(GetVolume());
+
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Play, base::Unretained(audio_renderer_.get())));
+ }
+
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Play, base::Unretained(video_renderer_.get())));
+ }
+
+ pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
+}
+
+void Pipeline::DoStop(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!pending_callbacks_.get());
+ SerialRunner::Queue bound_fns;
+
+ if (demuxer_) {
+ bound_fns.Push(base::Bind(
+ &Demuxer::Stop, base::Unretained(demuxer_)));
+ }
+
+ if (audio_renderer_) {
+ bound_fns.Push(base::Bind(
+ &AudioRenderer::Stop, base::Unretained(audio_renderer_.get())));
+ }
+
+ if (video_renderer_) {
+ bound_fns.Push(base::Bind(
+ &VideoRenderer::Stop, base::Unretained(video_renderer_.get())));
+ }
+
+ pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
+}
+
+void Pipeline::OnStopCompleted(PipelineStatus status) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, kStopping);
+ {
+ base::AutoLock l(lock_);
+ running_ = false;
+ }
+
+ SetState(kStopped);
+ pending_callbacks_.reset();
+ filter_collection_.reset();
+ audio_renderer_.reset();
+ video_renderer_.reset();
+ demuxer_ = NULL;
+
+ // If we stop during initialization/seeking we want to run |seek_cb_|
+ // followed by |stop_cb_| so we don't leave outstanding callbacks around.
+ if (!seek_cb_.is_null()) {
+ base::ResetAndReturn(&seek_cb_).Run(status_);
+ error_cb_.Reset();
+ }
+ if (!stop_cb_.is_null()) {
+ error_cb_.Reset();
+ base::ResetAndReturn(&stop_cb_).Run();
+
+ // NOTE: pipeline may be deleted at this point in time as a result of
+ // executing |stop_cb_|.
+ return;
+ }
+ if (!error_cb_.is_null()) {
+ DCHECK_NE(status_, PIPELINE_OK);
+ base::ResetAndReturn(&error_cb_).Run(status_);
+ }
+}
+
+void Pipeline::AddBufferedByteRange(int64 start, int64 end) {
+ DCHECK(IsRunning());
+ base::AutoLock auto_lock(lock_);
+ buffered_byte_ranges_.Add(start, end);
+ did_loading_progress_ = true;
+}
+
+void Pipeline::AddBufferedTimeRange(base::TimeDelta start,
+ base::TimeDelta end) {
+ DCHECK(IsRunning());
+ base::AutoLock auto_lock(lock_);
+ buffered_time_ranges_.Add(start, end);
+ did_loading_progress_ = true;
+}
+
+void Pipeline::OnNaturalVideoSizeChanged(const gfx::Size& size) {
+ DCHECK(IsRunning());
+ media_log_->AddEvent(media_log_->CreateVideoSizeSetEvent(
+ size.width(), size.height()));
+
+ base::AutoLock auto_lock(lock_);
+ natural_size_ = size;
+}
+
+void Pipeline::OnAudioRendererEnded() {
+ // Force post to process ended messages after current execution frame.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::DoAudioRendererEnded, base::Unretained(this)));
+ media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::AUDIO_ENDED));
+}
+
+void Pipeline::OnVideoRendererEnded() {
+ // Force post to process ended messages after current execution frame.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::DoVideoRendererEnded, base::Unretained(this)));
+ media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::VIDEO_ENDED));
+}
+
+// Called from any thread.
+void Pipeline::OnUpdateStatistics(const PipelineStatistics& stats) {
+ base::AutoLock auto_lock(lock_);
+ statistics_.audio_bytes_decoded += stats.audio_bytes_decoded;
+ statistics_.video_bytes_decoded += stats.video_bytes_decoded;
+ statistics_.video_frames_decoded += stats.video_frames_decoded;
+ statistics_.video_frames_dropped += stats.video_frames_dropped;
+}
+
+void Pipeline::StartTask(scoped_ptr<FilterCollection> filter_collection,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const PipelineStatusCB& seek_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& duration_change_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ CHECK_EQ(kCreated, state_)
+ << "Media pipeline cannot be started more than once";
+
+ filter_collection_ = filter_collection.Pass();
+ ended_cb_ = ended_cb;
+ error_cb_ = error_cb;
+ seek_cb_ = seek_cb;
+ buffering_state_cb_ = buffering_state_cb;
+ duration_change_cb_ = duration_change_cb;
+
+ StateTransitionTask(PIPELINE_OK);
+}
+
+void Pipeline::StopTask(const base::Closure& stop_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(stop_cb_.is_null());
+
+ if (state_ == kStopped) {
+ stop_cb.Run();
+ return;
+ }
+
+ SetState(kStopping);
+ pending_callbacks_.reset();
+ stop_cb_ = stop_cb;
+
+ DoStop(base::Bind(&Pipeline::OnStopCompleted, base::Unretained(this)));
+}
+
+void Pipeline::ErrorChangedTask(PipelineStatus error) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
+
+ if (state_ == kStopping || state_ == kStopped)
+ return;
+
+ SetState(kStopping);
+ pending_callbacks_.reset();
+ status_ = error;
+
+ DoStop(base::Bind(&Pipeline::OnStopCompleted, base::Unretained(this)));
+}
+
+void Pipeline::PlaybackRateChangedTask(float playback_rate) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Playback rate changes are only carried out while playing.
+ if (state_ != kStarting && state_ != kStarted)
+ return;
+
+ {
+ base::AutoLock auto_lock(lock_);
+ clock_->SetPlaybackRate(playback_rate);
+ }
+
+ if (demuxer_)
+ demuxer_->SetPlaybackRate(playback_rate);
+ if (audio_renderer_)
+ audio_renderer_->SetPlaybackRate(playback_rate_);
+ if (video_renderer_)
+ video_renderer_->SetPlaybackRate(playback_rate_);
+}
+
+void Pipeline::VolumeChangedTask(float volume) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Volume changes are only carried out while playing.
+ if (state_ != kStarting && state_ != kStarted)
+ return;
+
+ if (audio_renderer_)
+ audio_renderer_->SetVolume(volume);
+}
+
+void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(stop_cb_.is_null());
+
+ // Suppress seeking if we're not fully started.
+ if (state_ != kStarted) {
+ DCHECK(state_ == kStopping || state_ == kStopped)
+ << "Receive extra seek in unexpected state: " << state_;
+
+ // TODO(scherkus): should we run the callback? I'm tempted to say the API
+ // will only execute the first Seek() request.
+ DVLOG(1) << "Media pipeline has not started, ignoring seek to "
+ << time.InMicroseconds() << " (current state: " << state_ << ")";
+ return;
+ }
+
+ DCHECK(seek_cb_.is_null());
+
+ SetState(kSeeking);
+ base::TimeDelta seek_timestamp = std::max(time, demuxer_->GetStartTime());
+ seek_cb_ = seek_cb;
+ audio_ended_ = false;
+ video_ended_ = false;
+
+ // Kick off seeking!
+ {
+ base::AutoLock auto_lock(lock_);
+ if (clock_->IsPlaying())
+ clock_->Pause();
+ clock_->SetTime(seek_timestamp, seek_timestamp);
+ }
+ DoSeek(seek_timestamp, base::Bind(
+ &Pipeline::OnStateTransition, base::Unretained(this)));
+}
+
+void Pipeline::DoAudioRendererEnded() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ != kStarted)
+ return;
+
+ DCHECK(!audio_ended_);
+ audio_ended_ = true;
+
+ // Start clock since there is no more audio to trigger clock updates.
+ if (!audio_disabled_) {
+ base::AutoLock auto_lock(lock_);
+ clock_->SetMaxTime(clock_->Duration());
+ StartClockIfWaitingForTimeUpdate_Locked();
+ }
+
+ RunEndedCallbackIfNeeded();
+}
+
+void Pipeline::DoVideoRendererEnded() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ != kStarted)
+ return;
+
+ DCHECK(!video_ended_);
+ video_ended_ = true;
+
+ RunEndedCallbackIfNeeded();
+}
+
+void Pipeline::RunEndedCallbackIfNeeded() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (audio_renderer_ && !audio_ended_ && !audio_disabled_)
+ return;
+
+ if (video_renderer_ && !video_ended_)
+ return;
+
+ {
+ base::AutoLock auto_lock(lock_);
+ clock_->EndOfStream();
+ }
+
+ DCHECK_EQ(status_, PIPELINE_OK);
+ ended_cb_.Run();
+}
+
+void Pipeline::AudioDisabledTask() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ base::AutoLock auto_lock(lock_);
+ has_audio_ = false;
+ audio_disabled_ = true;
+
+ // Notify our demuxer that we're no longer rendering audio.
+ demuxer_->OnAudioRendererDisabled();
+
+ // Start clock since there is no more audio to trigger clock updates.
+ clock_->SetMaxTime(clock_->Duration());
+ StartClockIfWaitingForTimeUpdate_Locked();
+}
+
+void Pipeline::InitializeDemuxer(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ demuxer_ = filter_collection_->GetDemuxer();
+ demuxer_->Initialize(this, done_cb);
+}
+
+void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ audio_renderer_ = filter_collection_->GetAudioRenderer();
+ audio_renderer_->Initialize(
+ demuxer_->GetStream(DemuxerStream::AUDIO),
+ done_cb,
+ base::Bind(&Pipeline::OnUpdateStatistics, base::Unretained(this)),
+ base::Bind(&Pipeline::OnAudioUnderflow, base::Unretained(this)),
+ base::Bind(&Pipeline::OnAudioTimeUpdate, base::Unretained(this)),
+ base::Bind(&Pipeline::OnAudioRendererEnded, base::Unretained(this)),
+ base::Bind(&Pipeline::OnAudioDisabled, base::Unretained(this)),
+ base::Bind(&Pipeline::SetError, base::Unretained(this)));
+}
+
+void Pipeline::InitializeVideoRenderer(const PipelineStatusCB& done_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ {
+ // Get an initial natural size so we have something when we signal
+ // the kHaveMetadata buffering state.
+ base::AutoLock l(lock_);
+ natural_size_ = stream->video_decoder_config().natural_size();
+ }
+
+ video_renderer_ = filter_collection_->GetVideoRenderer();
+ video_renderer_->Initialize(
+ stream,
+ done_cb,
+ base::Bind(&Pipeline::OnUpdateStatistics, base::Unretained(this)),
+ base::Bind(&Pipeline::OnVideoTimeUpdate, base::Unretained(this)),
+ base::Bind(&Pipeline::OnNaturalVideoSizeChanged, base::Unretained(this)),
+ base::Bind(&Pipeline::OnVideoRendererEnded, base::Unretained(this)),
+ base::Bind(&Pipeline::SetError, base::Unretained(this)),
+ base::Bind(&Pipeline::GetMediaTime, base::Unretained(this)),
+ base::Bind(&Pipeline::GetMediaDuration, base::Unretained(this)));
+}
+
+void Pipeline::OnAudioUnderflow() {
+ if (!message_loop_->BelongsToCurrentThread()) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::OnAudioUnderflow, base::Unretained(this)));
+ return;
+ }
+
+ if (state_ != kStarted)
+ return;
+
+ if (audio_renderer_)
+ audio_renderer_->ResumeAfterUnderflow();
+}
+
+void Pipeline::StartClockIfWaitingForTimeUpdate_Locked() {
+ lock_.AssertAcquired();
+ if (!waiting_for_clock_update_)
+ return;
+
+ waiting_for_clock_update_ = false;
+ clock_->Play();
+}
+
+} // namespace media
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
new file mode 100644
index 00000000000..09ff9041639
--- /dev/null
+++ b/chromium/media/base/pipeline.h
@@ -0,0 +1,453 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_PIPELINE_H_
+#define MEDIA_BASE_PIPELINE_H_
+
+#include <string>
+
+#include "base/gtest_prod_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/default_tick_clock.h"
+#include "media/base/audio_renderer.h"
+#include "media/base/demuxer.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/ranges.h"
+#include "media/base/serial_runner.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+class MessageLoopProxy;
+class TimeDelta;
+}
+
+namespace media {
+
+class Clock;
+class FilterCollection;
+class MediaLog;
+class VideoRenderer;
+
+// Pipeline runs the media pipeline. Filters are created and called on the
+// message loop injected into this object. Pipeline works like a state
+// machine to perform asynchronous initialization, pausing, seeking and playing.
+//
+// Here's a state diagram that describes the lifetime of this object.
+//
+// [ *Created ] [ Any State ]
+// | Start() | Stop() / SetError()
+// V V
+// [ InitXXX (for each filter) ] [ Stopping ]
+// | |
+// V V
+// [ InitPreroll ] [ Stopped ]
+// |
+// V
+// [ Starting ] <-- [ Seeking ]
+// | ^
+// V |
+// [ Started ] ----------'
+// Seek()
+//
+// Initialization is a series of state transitions from "Created" through each
+// filter initialization state. When all filter initialization states have
+// completed, we are implicitly in a "Paused" state. At that point we simulate
+// a Seek() to the beginning of the media to give filters a chance to preroll.
+// From then on the normal Seek() transitions are carried out and we start
+// playing the media.
+//
+// If any error ever happens, this object will transition to the "Error" state
+// from any state. If Stop() is ever called, this object will transition to
+// "Stopped" state.
+class MEDIA_EXPORT Pipeline : public DemuxerHost {
+ public:
+ // Buffering states the pipeline transitions between during playback.
+ // kHaveMetadata:
+ // Indicates that the following things are known:
+ // content duration, natural size, start time, and whether the content has
+ // audio and/or video in supported formats.
+ // kPrerollCompleted:
+ // All renderers have buffered enough data to satisfy preroll and are ready
+ // to start playback.
+ enum BufferingState {
+ kHaveMetadata,
+ kPrerollCompleted,
+ };
+
+ typedef base::Callback<void(BufferingState)> BufferingStateCB;
+
+ // Constructs a media pipeline that will execute on |message_loop|.
+ Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ MediaLog* media_log);
+ virtual ~Pipeline();
+
+ // Build a pipeline to using the given filter collection to construct a filter
+ // chain, executing |seek_cb| when the initial seek/preroll has completed.
+ //
+ // |filter_collection| must be a complete collection containing a demuxer,
+ // audio/video decoders, and audio/video renderers. Failing to do so will
+ // result in a crash.
+ //
+ // The following permanent callbacks will be executed as follows up until
+ // Stop() has completed:
+ // |ended_cb| will be executed whenever the media reaches the end.
+ // |error_cb| will be executed whenever an error occurs but hasn't
+ // been reported already through another callback.
+ // |buffering_state_cb| Optional callback that will be executed whenever the
+ // pipeline's buffering state changes.
+ // |duration_change_cb| Optional callback that will be executed whenever the
+ // presentation duration changes.
+ // It is an error to call this method after the pipeline has already started.
+ void Start(scoped_ptr<FilterCollection> filter_collection,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const PipelineStatusCB& seek_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& duration_change_cb);
+
+ // Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
+ // teardown has completed.
+ //
+ // Stop() must complete before destroying the pipeline. It it permissible to
+ // call Stop() at any point during the lifetime of the pipeline.
+ //
+ // It is safe to delete the pipeline during the execution of |stop_cb|.
+ void Stop(const base::Closure& stop_cb);
+
+ // Attempt to seek to the position specified by time. |seek_cb| will be
+ // executed when the all filters in the pipeline have processed the seek.
+ //
+ // Clients are expected to call GetMediaTime() to check whether the seek
+ // succeeded.
+ //
+ // It is an error to call this method if the pipeline has not started.
+ void Seek(base::TimeDelta time, const PipelineStatusCB& seek_cb);
+
+ // Returns true if the pipeline has been started via Start(). If IsRunning()
+ // returns true, it is expected that Stop() will be called before destroying
+ // the pipeline.
+ bool IsRunning() const;
+
+ // Returns true if the media has audio.
+ bool HasAudio() const;
+
+ // Returns true if the media has video.
+ bool HasVideo() const;
+
+ // Gets the current playback rate of the pipeline. When the pipeline is
+ // started, the playback rate will be 0.0f. A rate of 1.0f indicates
+ // that the pipeline is rendering the media at the standard rate. Valid
+ // values for playback rate are >= 0.0f.
+ float GetPlaybackRate() const;
+
+ // Attempt to adjust the playback rate. Setting a playback rate of 0.0f pauses
+ // all rendering of the media. A rate of 1.0f indicates a normal playback
+ // rate. Values for the playback rate must be greater than or equal to 0.0f.
+ //
+ // TODO(scherkus): What about maximum rate? Does HTML5 specify a max?
+ void SetPlaybackRate(float playback_rate);
+
+ // Gets the current volume setting being used by the audio renderer. When
+ // the pipeline is started, this value will be 1.0f. Valid values range
+ // from 0.0f to 1.0f.
+ float GetVolume() const;
+
+ // Attempt to set the volume of the audio renderer. Valid values for volume
+ // range from 0.0f (muted) to 1.0f (full volume). This value affects all
+ // channels proportionately for multi-channel audio streams.
+ void SetVolume(float volume);
+
+ // Returns the current media playback time, which progresses from 0 until
+ // GetMediaDuration().
+ base::TimeDelta GetMediaTime() const;
+
+ // Get approximate time ranges of buffered media.
+ Ranges<base::TimeDelta> GetBufferedTimeRanges();
+
+ // Get the duration of the media in microseconds. If the duration has not
+ // been determined yet, then returns 0.
+ base::TimeDelta GetMediaDuration() const;
+
+ // Get the total size of the media file. If the size has not yet been
+ // determined or can not be determined, this value is 0.
+ int64 GetTotalBytes() const;
+
+ // Gets the natural size of the video output in pixel units. If there is no
+ // video or the video has not been rendered yet, the width and height will
+ // be 0.
+ void GetNaturalVideoSize(gfx::Size* out_size) const;
+
+ // Return true if loading progress has been made since the last time this
+ // method was called.
+ bool DidLoadingProgress() const;
+
+ // Gets the current pipeline statistics.
+ PipelineStatistics GetStatistics() const;
+
+ void SetClockForTesting(Clock* clock);
+ void SetErrorForTesting(PipelineStatus status);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(PipelineTest, GetBufferedTimeRanges);
+ FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRenderer);
+ FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRendererDuringInit);
+ FRIEND_TEST_ALL_PREFIXES(PipelineTest, EndedCallback);
+ FRIEND_TEST_ALL_PREFIXES(PipelineTest, AudioStreamShorterThanVideo);
+ friend class MediaLog;
+
+ // Pipeline states, as described above.
+ enum State {
+ kCreated,
+ kInitDemuxer,
+ kInitAudioRenderer,
+ kInitVideoRenderer,
+ kInitPrerolling,
+ kSeeking,
+ kStarting,
+ kStarted,
+ kStopping,
+ kStopped,
+ };
+
+ // Updates |state_|. All state transitions should use this call.
+ void SetState(State next_state);
+
+ static const char* GetStateString(State state);
+ State GetNextState() const;
+
+ // Helper method that runs & resets |seek_cb_| and resets |seek_timestamp_|
+ // and |seek_pending_|.
+ void FinishSeek();
+
+ // DataSourceHost (by way of DemuxerHost) implementation.
+ virtual void SetTotalBytes(int64 total_bytes) OVERRIDE;
+ virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE;
+ virtual void AddBufferedTimeRange(base::TimeDelta start,
+ base::TimeDelta end) OVERRIDE;
+
+ // DemuxerHost implementaion.
+ virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
+ virtual void OnDemuxerError(PipelineStatus error) OVERRIDE;
+
+ // Initiates teardown sequence in response to a runtime error.
+ //
+ // Safe to call from any thread.
+ void SetError(PipelineStatus error);
+
+ // Callback executed when the natural size of the video has changed.
+ void OnNaturalVideoSizeChanged(const gfx::Size& size);
+
+ // Callbacks executed when a renderer has ended.
+ void OnAudioRendererEnded();
+ void OnVideoRendererEnded();
+
+ // Callback executed by filters to update statistics.
+ void OnUpdateStatistics(const PipelineStatistics& stats);
+
+ // Callback executed by audio renderer when it has been disabled.
+ void OnAudioDisabled();
+
+ // Callback executed by audio renderer to update clock time.
+ void OnAudioTimeUpdate(base::TimeDelta time, base::TimeDelta max_time);
+
+ // Callback executed by video renderer to update clock time.
+ void OnVideoTimeUpdate(base::TimeDelta max_time);
+
+ // The following "task" methods correspond to the public methods, but these
+ // methods are run as the result of posting a task to the PipelineInternal's
+ // message loop.
+ void StartTask(scoped_ptr<FilterCollection> filter_collection,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const PipelineStatusCB& seek_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& duration_change_cb);
+
+ // Stops and destroys all filters, placing the pipeline in the kStopped state.
+ void StopTask(const base::Closure& stop_cb);
+
+ // Carries out stopping and destroying all filters, placing the pipeline in
+ // the kStopped state.
+ void ErrorChangedTask(PipelineStatus error);
+
+ // Carries out notifying filters that the playback rate has changed.
+ void PlaybackRateChangedTask(float playback_rate);
+
+ // Carries out notifying filters that the volume has changed.
+ void VolumeChangedTask(float volume);
+
+ // Carries out notifying filters that we are seeking to a new timestamp.
+ void SeekTask(base::TimeDelta time, const PipelineStatusCB& seek_cb);
+
+ // Handles audio/video ended logic and running |ended_cb_|.
+ void DoAudioRendererEnded();
+ void DoVideoRendererEnded();
+ void RunEndedCallbackIfNeeded();
+
+ // Carries out disabling the audio renderer.
+ void AudioDisabledTask();
+
+ // Kicks off initialization for each media object, executing |done_cb| with
+ // the result when completed.
+ void InitializeDemuxer(const PipelineStatusCB& done_cb);
+ void InitializeAudioRenderer(const PipelineStatusCB& done_cb);
+ void InitializeVideoRenderer(const PipelineStatusCB& done_cb);
+
+ // Kicks off destroying filters. Called by StopTask() and ErrorChangedTask().
+ // When we start to tear down the pipeline, we will consider two cases:
+ // 1. when pipeline has not been initialized, we will transit to stopping
+ // state first.
+ // 2. when pipeline has been initialized, we will first transit to pausing
+ // => flushing => stopping => stopped state.
+ // This will remove the race condition during stop between filters.
+ void TearDownPipeline();
+
+ // Compute the time corresponding to a byte offset.
+ base::TimeDelta TimeForByteOffset_Locked(int64 byte_offset) const;
+
+ void OnStateTransition(PipelineStatus status);
+ void StateTransitionTask(PipelineStatus status);
+
+ // Initiates an asynchronous preroll call sequence executing |done_cb|
+ // with the final status when completed.
+ void DoInitialPreroll(const PipelineStatusCB& done_cb);
+
+ // Initiates an asynchronous pause-flush-seek-preroll call sequence
+ // executing |done_cb| with the final status when completed.
+ //
+ // TODO(scherkus): Prerolling should be separate from seeking so we can report
+ // finer grained ready states (HAVE_CURRENT_DATA vs. HAVE_FUTURE_DATA)
+ // indepentent from seeking.
+ void DoSeek(base::TimeDelta seek_timestamp, const PipelineStatusCB& done_cb);
+
+ // Updates playback rate and volume and initiates an asynchronous play call
+ // sequence executing |done_cb| with the final status when completed.
+ void DoPlay(const PipelineStatusCB& done_cb);
+
+ // Initiates an asynchronous pause-flush-stop call sequence executing
+ // |done_cb| when completed.
+ void DoStop(const PipelineStatusCB& done_cb);
+ void OnStopCompleted(PipelineStatus status);
+
+ void OnAudioUnderflow();
+
+ void StartClockIfWaitingForTimeUpdate_Locked();
+
+ // Message loop used to execute pipeline tasks.
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ // MediaLog to which to log events.
+ scoped_refptr<MediaLog> media_log_;
+
+ // Lock used to serialize access for the following data members.
+ mutable base::Lock lock_;
+
+ // Whether or not the pipeline is running.
+ bool running_;
+
+ // Amount of available buffered data. Set by filters.
+ Ranges<int64> buffered_byte_ranges_;
+ Ranges<base::TimeDelta> buffered_time_ranges_;
+
+ // True when AddBufferedByteRange() has been called more recently than
+ // DidLoadingProgress().
+ mutable bool did_loading_progress_;
+
+ // Total size of the media. Set by filters.
+ int64 total_bytes_;
+
+ // Video's natural width and height. Set by filters.
+ gfx::Size natural_size_;
+
+ // Current volume level (from 0.0f to 1.0f). This value is set immediately
+ // via SetVolume() and a task is dispatched on the message loop to notify the
+ // filters.
+ float volume_;
+
+ // Current playback rate (>= 0.0f). This value is set immediately via
+ // SetPlaybackRate() and a task is dispatched on the message loop to notify
+ // the filters.
+ float playback_rate_;
+
+ // base::TickClock used by |clock_|.
+ base::DefaultTickClock default_tick_clock_;
+
+ // Reference clock. Keeps track of current playback time. Uses system
+ // clock and linear interpolation, but can have its time manually set
+ // by filters.
+ scoped_ptr<Clock> clock_;
+
+ // If this value is set to true, then |clock_| is paused and we are waiting
+ // for an update of the clock greater than or equal to the elapsed time to
+ // start the clock.
+ bool waiting_for_clock_update_;
+
+ // Status of the pipeline. Initialized to PIPELINE_OK which indicates that
+ // the pipeline is operating correctly. Any other value indicates that the
+ // pipeline is stopped or is stopping. Clients can call the Stop() method to
+ // reset the pipeline state, and restore this to PIPELINE_OK.
+ PipelineStatus status_;
+
+ // Whether the media contains rendered audio and video streams.
+ // TODO(fischman,scherkus): replace these with checks for
+ // {audio,video}_decoder_ once extraction of {Audio,Video}Decoder from the
+ // Filter heirarchy is done.
+ bool has_audio_;
+ bool has_video_;
+
+ // The following data members are only accessed by tasks posted to
+ // |message_loop_|.
+
+ // Member that tracks the current state.
+ State state_;
+
+ // Whether we've received the audio/video ended events.
+ bool audio_ended_;
+ bool video_ended_;
+
+ // Set to true in DisableAudioRendererTask().
+ bool audio_disabled_;
+
+ // Temporary callback used for Start() and Seek().
+ PipelineStatusCB seek_cb_;
+
+ // Temporary callback used for Stop().
+ base::Closure stop_cb_;
+
+ // Permanent callbacks passed in via Start().
+ base::Closure ended_cb_;
+ PipelineStatusCB error_cb_;
+ BufferingStateCB buffering_state_cb_;
+ base::Closure duration_change_cb_;
+
+ // Contains the demuxer and renderers to use when initializing.
+ scoped_ptr<FilterCollection> filter_collection_;
+
+ // Holds the initialized demuxer. Used for seeking. Owned by client.
+ Demuxer* demuxer_;
+
+ // Holds the initialized renderers. Used for setting the volume,
+ // playback rate, and determining when playback has finished.
+ scoped_ptr<AudioRenderer> audio_renderer_;
+ scoped_ptr<VideoRenderer> video_renderer_;
+
+ PipelineStatistics statistics_;
+
+ // Time of pipeline creation; is non-zero only until the pipeline first
+ // reaches "kStarted", at which point it is used & zeroed out.
+ base::TimeTicks creation_time_;
+
+ scoped_ptr<SerialRunner> pending_callbacks_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(Pipeline);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_PIPELINE_H_
diff --git a/chromium/media/base/pipeline_status.cc b/chromium/media/base/pipeline_status.cc
new file mode 100644
index 00000000000..6c08383cdc9
--- /dev/null
+++ b/chromium/media/base/pipeline_status.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/pipeline_status.h"
+
+#include "base/bind.h"
+#include "base/metrics/histogram.h"
+
+namespace media {
+
+static void ReportAndRun(const std::string& name,
+ const PipelineStatusCB& cb,
+ PipelineStatus status) {
+ UMA_HISTOGRAM_ENUMERATION(name, status, PIPELINE_STATUS_MAX);
+ cb.Run(status);
+}
+
+PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
+ const PipelineStatusCB& cb) {
+ return base::Bind(&ReportAndRun, name, cb);
+}
+
+} // namespace media
diff --git a/chromium/media/base/pipeline_status.h b/chromium/media/base/pipeline_status.h
new file mode 100644
index 00000000000..c208d01d583
--- /dev/null
+++ b/chromium/media/base/pipeline_status.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_PIPELINE_STATUS_H_
+#define MEDIA_BASE_PIPELINE_STATUS_H_
+
+#include "base/callback.h"
+
+#include <string>
+
+namespace media {
+
+// Status states for pipeline. All codes except PIPELINE_OK indicate errors.
+// Logged to UMA, so never reuse a value, always add new/greater ones!
+// TODO(vrk/scherkus): Trim the unused status codes. (crbug.com/126070)
+enum PipelineStatus {
+ PIPELINE_OK = 0,
+ PIPELINE_ERROR_URL_NOT_FOUND = 1,
+ PIPELINE_ERROR_NETWORK = 2,
+ PIPELINE_ERROR_DECODE = 3,
+ PIPELINE_ERROR_DECRYPT = 4,
+ PIPELINE_ERROR_ABORT = 5,
+ PIPELINE_ERROR_INITIALIZATION_FAILED = 6,
+ PIPELINE_ERROR_COULD_NOT_RENDER = 8,
+ PIPELINE_ERROR_READ = 9,
+ PIPELINE_ERROR_OPERATION_PENDING = 10,
+ PIPELINE_ERROR_INVALID_STATE = 11,
+ // Demuxer related errors.
+ DEMUXER_ERROR_COULD_NOT_OPEN = 12,
+ DEMUXER_ERROR_COULD_NOT_PARSE = 13,
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS = 14,
+ // Decoder related errors.
+ DECODER_ERROR_NOT_SUPPORTED = 15,
+ PIPELINE_STATUS_MAX, // Must be greater than all other values logged.
+};
+
+typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
+
+// Wrap & return a callback around |cb| which reports its argument to UMA under
+// the requested |name|.
+PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
+ const PipelineStatusCB& cb);
+
+// TODO(scherkus): this should be moved alongside host interface definitions.
+struct PipelineStatistics {
+ PipelineStatistics()
+ : audio_bytes_decoded(0),
+ video_bytes_decoded(0),
+ video_frames_decoded(0),
+ video_frames_dropped(0) {
+ }
+
+ uint32 audio_bytes_decoded; // Should be uint64?
+ uint32 video_bytes_decoded; // Should be uint64?
+ uint32 video_frames_decoded;
+ uint32 video_frames_dropped;
+};
+
+// Used for updating pipeline statistics.
+typedef base::Callback<void(const PipelineStatistics&)> StatisticsCB;
+
+} // namespace media
+
+#endif // MEDIA_BASE_PIPELINE_STATUS_H_
diff --git a/chromium/media/base/pipeline_unittest.cc b/chromium/media/base/pipeline_unittest.cc
new file mode 100644
index 00000000000..90c616f92db
--- /dev/null
+++ b/chromium/media/base/pipeline_unittest.cc
@@ -0,0 +1,1179 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/stl_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/clock.h"
+#include "media/base/clock.h"
+#include "media/base/gmock_callback_support.h"
+#include "media/base/media_log.h"
+#include "media/base/mock_filters.h"
+#include "media/base/pipeline.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/size.h"
+
+using ::testing::_;
+using ::testing::DeleteArg;
+using ::testing::DoAll;
+// TODO(scherkus): Remove InSequence after refactoring Pipeline.
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Mock;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::StrictMock;
+using ::testing::WithArg;
+
+namespace media {
+
+// Demuxer properties.
+static const int kTotalBytes = 1024;
+static const int kBitrate = 1234;
+
+ACTION_P(SetDemuxerProperties, duration) {
+ arg0->SetTotalBytes(kTotalBytes);
+ arg0->SetDuration(duration);
+}
+
+ACTION_P2(Stop, pipeline, stop_cb) {
+ pipeline->Stop(stop_cb);
+}
+
+ACTION_P2(SetError, pipeline, status) {
+ pipeline->SetErrorForTesting(status);
+}
+
+// Used for setting expectations on pipeline callbacks. Using a StrictMock
+// also lets us test for missing callbacks.
+class CallbackHelper {
+ public:
+ CallbackHelper() {}
+ virtual ~CallbackHelper() {}
+
+ MOCK_METHOD1(OnStart, void(PipelineStatus));
+ MOCK_METHOD1(OnSeek, void(PipelineStatus));
+ MOCK_METHOD0(OnStop, void());
+ MOCK_METHOD0(OnEnded, void());
+ MOCK_METHOD1(OnError, void(PipelineStatus));
+ MOCK_METHOD1(OnBufferingState, void(Pipeline::BufferingState));
+ MOCK_METHOD0(OnDurationChange, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallbackHelper);
+};
+
+// TODO(scherkus): even though some filters are initialized on separate
+// threads these test aren't flaky... why? It's because filters' Initialize()
+// is executed on |message_loop_| and the mock filters instantly call
+// InitializationComplete(), which keeps the pipeline humming along. If
+// either filters don't call InitializationComplete() immediately or filter
+// initialization is moved to a separate thread this test will become flaky.
+class PipelineTest : public ::testing::Test {
+ public:
+ PipelineTest()
+ : pipeline_(new Pipeline(message_loop_.message_loop_proxy(),
+ new MediaLog())),
+ filter_collection_(new FilterCollection()),
+ demuxer_(new MockDemuxer()) {
+ filter_collection_->SetDemuxer(demuxer_.get());
+
+ video_renderer_ = new MockVideoRenderer();
+ scoped_ptr<VideoRenderer> video_renderer(video_renderer_);
+ filter_collection_->SetVideoRenderer(video_renderer.Pass());
+
+ audio_renderer_ = new MockAudioRenderer();
+ scoped_ptr<AudioRenderer> audio_renderer(audio_renderer_);
+ filter_collection_->SetAudioRenderer(audio_renderer.Pass());
+
+ // InitializeDemuxer() adds overriding expectations for expected non-NULL
+ // streams.
+ DemuxerStream* null_pointer = NULL;
+ EXPECT_CALL(*demuxer_, GetStream(_))
+ .WillRepeatedly(Return(null_pointer));
+
+ EXPECT_CALL(*demuxer_, GetStartTime())
+ .WillRepeatedly(Return(base::TimeDelta()));
+ }
+
+ virtual ~PipelineTest() {
+ if (!pipeline_ || !pipeline_->IsRunning())
+ return;
+
+ ExpectStop();
+
+ // Expect a stop callback if we were started.
+ EXPECT_CALL(callbacks_, OnStop());
+ pipeline_->Stop(base::Bind(&CallbackHelper::OnStop,
+ base::Unretained(&callbacks_)));
+ message_loop_.RunUntilIdle();
+ }
+
+ protected:
+ // Sets up expectations to allow the demuxer to initialize.
+ typedef std::vector<MockDemuxerStream*> MockDemuxerStreamVector;
+ void InitializeDemuxer(MockDemuxerStreamVector* streams,
+ const base::TimeDelta& duration) {
+ EXPECT_CALL(callbacks_, OnDurationChange());
+ EXPECT_CALL(*demuxer_, Initialize(_, _))
+ .WillOnce(DoAll(SetDemuxerProperties(duration),
+ RunCallback<1>(PIPELINE_OK)));
+
+ // Configure the demuxer to return the streams.
+ for (size_t i = 0; i < streams->size(); ++i) {
+ DemuxerStream* stream = (*streams)[i];
+ EXPECT_CALL(*demuxer_, GetStream(stream->type()))
+ .WillRepeatedly(Return(stream));
+ }
+ }
+
+ void InitializeDemuxer(MockDemuxerStreamVector* streams) {
+ // Initialize with a default non-zero duration.
+ InitializeDemuxer(streams, base::TimeDelta::FromSeconds(10));
+ }
+
+ scoped_ptr<StrictMock<MockDemuxerStream> > CreateStream(
+ DemuxerStream::Type type) {
+ scoped_ptr<StrictMock<MockDemuxerStream> > stream(
+ new StrictMock<MockDemuxerStream>(type));
+ return stream.Pass();
+ }
+
+ // Sets up expectations to allow the video renderer to initialize.
+ void InitializeVideoRenderer(DemuxerStream* stream) {
+ EXPECT_CALL(*video_renderer_, Initialize(stream, _, _, _, _, _, _, _, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
+
+ // Startup sequence.
+ EXPECT_CALL(*video_renderer_, Preroll(demuxer_->GetStartTime(), _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+ }
+
+ // Sets up expectations to allow the audio renderer to initialize.
+ void InitializeAudioRenderer(DemuxerStream* stream,
+ bool disable_after_init_cb) {
+ if (disable_after_init_cb) {
+ EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(RunCallback<1>(PIPELINE_OK),
+ WithArg<6>(RunClosure<0>()))); // |disabled_cb|.
+ } else {
+ EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<4>(&audio_time_cb_),
+ RunCallback<1>(PIPELINE_OK)));
+ }
+ }
+
+ // Sets up expectations on the callback and initializes the pipeline. Called
+ // after tests have set expectations any filters they wish to use.
+ void InitializePipeline(PipelineStatus start_status) {
+ EXPECT_CALL(callbacks_, OnStart(start_status));
+
+ if (start_status == PIPELINE_OK) {
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kHaveMetadata));
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
+
+ if (audio_stream_) {
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
+
+ // Startup sequence.
+ EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*audio_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+ }
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ }
+
+ pipeline_->Start(
+ filter_collection_.Pass(),
+ base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnBufferingState,
+ base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnDurationChange,
+ base::Unretained(&callbacks_)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void CreateAudioStream() {
+ audio_stream_ = CreateStream(DemuxerStream::AUDIO);
+ }
+
+ void CreateVideoStream() {
+ video_stream_ = CreateStream(DemuxerStream::VIDEO);
+ video_stream_->set_video_decoder_config(video_decoder_config_);
+ }
+
+ MockDemuxerStream* audio_stream() {
+ return audio_stream_.get();
+ }
+
+ MockDemuxerStream* video_stream() {
+ return video_stream_.get();
+ }
+
+ void ExpectSeek(const base::TimeDelta& seek_time) {
+ // Every filter should receive a call to Seek().
+ EXPECT_CALL(*demuxer_, Seek(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(_));
+
+ if (audio_stream_) {
+ EXPECT_CALL(*audio_renderer_, Pause(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
+ EXPECT_CALL(*audio_renderer_, SetVolume(_));
+ EXPECT_CALL(*audio_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+ }
+
+ if (video_stream_) {
+ EXPECT_CALL(*video_renderer_, Pause(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Flush(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Preroll(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, SetPlaybackRate(_));
+ EXPECT_CALL(*video_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+ }
+
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+
+ // We expect a successful seek callback.
+ EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
+ }
+
+ void DoSeek(const base::TimeDelta& seek_time) {
+ pipeline_->Seek(seek_time,
+ base::Bind(&CallbackHelper::OnSeek,
+ base::Unretained(&callbacks_)));
+
+ // We expect the time to be updated only after the seek has completed.
+ EXPECT_NE(seek_time, pipeline_->GetMediaTime());
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(seek_time, pipeline_->GetMediaTime());
+ }
+
+ void ExpectStop() {
+ if (demuxer_)
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+
+ if (audio_stream_)
+ EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+
+ if (video_stream_)
+ EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ }
+
+ // Fixture members.
+ StrictMock<CallbackHelper> callbacks_;
+ base::SimpleTestTickClock test_tick_clock_;
+ base::MessageLoop message_loop_;
+ scoped_ptr<Pipeline> pipeline_;
+
+ scoped_ptr<FilterCollection> filter_collection_;
+ scoped_ptr<MockDemuxer> demuxer_;
+ MockVideoRenderer* video_renderer_;
+ MockAudioRenderer* audio_renderer_;
+ scoped_ptr<StrictMock<MockDemuxerStream> > audio_stream_;
+ scoped_ptr<StrictMock<MockDemuxerStream> > video_stream_;
+ AudioRenderer::TimeCB audio_time_cb_;
+ VideoDecoderConfig video_decoder_config_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PipelineTest);
+};
+
+// Test that playback controls methods no-op when the pipeline hasn't been
+// started.
+TEST_F(PipelineTest, NotStarted) {
+ const base::TimeDelta kZero;
+
+ EXPECT_FALSE(pipeline_->IsRunning());
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_FALSE(pipeline_->HasVideo());
+
+ // Setting should still work.
+ EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
+ pipeline_->SetPlaybackRate(-1.0f);
+ EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
+ pipeline_->SetPlaybackRate(1.0f);
+ EXPECT_EQ(1.0f, pipeline_->GetPlaybackRate());
+
+ // Setting should still work.
+ EXPECT_EQ(1.0f, pipeline_->GetVolume());
+ pipeline_->SetVolume(-1.0f);
+ EXPECT_EQ(1.0f, pipeline_->GetVolume());
+ pipeline_->SetVolume(0.0f);
+ EXPECT_EQ(0.0f, pipeline_->GetVolume());
+
+ EXPECT_TRUE(kZero == pipeline_->GetMediaTime());
+ EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_TRUE(kZero == pipeline_->GetMediaDuration());
+
+ EXPECT_EQ(0, pipeline_->GetTotalBytes());
+
+ // Should always get set to zero.
+ gfx::Size size(1, 1);
+ pipeline_->GetNaturalVideoSize(&size);
+ EXPECT_EQ(0, size.width());
+ EXPECT_EQ(0, size.height());
+}
+
+TEST_F(PipelineTest, NeverInitializes) {
+ // Don't execute the callback passed into Initialize().
+ EXPECT_CALL(*demuxer_, Initialize(_, _));
+
+ // This test hangs during initialization by never calling
+ // InitializationComplete(). StrictMock<> will ensure that the callback is
+ // never executed.
+ pipeline_->Start(
+ filter_collection_.Pass(),
+ base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnBufferingState,
+ base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnDurationChange,
+ base::Unretained(&callbacks_)));
+ message_loop_.RunUntilIdle();
+
+
+ // Because our callback will get executed when the test tears down, we'll
+ // verify that nothing has been called, then set our expectation for the call
+ // made during tear down.
+ Mock::VerifyAndClear(&callbacks_);
+ EXPECT_CALL(callbacks_, OnStart(PIPELINE_OK));
+}
+
+TEST_F(PipelineTest, URLNotFound) {
+ EXPECT_CALL(*demuxer_, Initialize(_, _))
+ .WillOnce(RunCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
+ EXPECT_CALL(*demuxer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ InitializePipeline(PIPELINE_ERROR_URL_NOT_FOUND);
+}
+
+TEST_F(PipelineTest, NoStreams) {
+ EXPECT_CALL(*demuxer_, Initialize(_, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*demuxer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ InitializePipeline(PIPELINE_ERROR_COULD_NOT_RENDER);
+}
+
+TEST_F(PipelineTest, AudioStream) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_TRUE(pipeline_->HasAudio());
+ EXPECT_FALSE(pipeline_->HasVideo());
+}
+
+TEST_F(PipelineTest, VideoStream) {
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+}
+
+TEST_F(PipelineTest, AudioVideoStream) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_TRUE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+}
+
+TEST_F(PipelineTest, Seek) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializeVideoRenderer(video_stream());
+
+ // Initialize then seek!
+ InitializePipeline(PIPELINE_OK);
+
+ // Every filter should receive a call to Seek().
+ base::TimeDelta expected = base::TimeDelta::FromSeconds(2000);
+ ExpectSeek(expected);
+ DoSeek(expected);
+}
+
+TEST_F(PipelineTest, SetVolume) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+
+ // The audio renderer should receive a call to SetVolume().
+ float expected = 0.5f;
+ EXPECT_CALL(*audio_renderer_, SetVolume(expected));
+
+ // Initialize then set volume!
+ InitializePipeline(PIPELINE_OK);
+ pipeline_->SetVolume(expected);
+}
+
+TEST_F(PipelineTest, Properties) {
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
+ InitializeDemuxer(&streams, kDuration);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_EQ(kDuration.ToInternalValue(),
+ pipeline_->GetMediaDuration().ToInternalValue());
+ EXPECT_EQ(kTotalBytes, pipeline_->GetTotalBytes());
+ EXPECT_FALSE(pipeline_->DidLoadingProgress());
+}
+
+TEST_F(PipelineTest, GetBufferedTimeRanges) {
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
+ InitializeDemuxer(&streams, kDuration);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+
+ EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
+
+ EXPECT_FALSE(pipeline_->DidLoadingProgress());
+ pipeline_->AddBufferedByteRange(0, kTotalBytes / 8);
+ EXPECT_TRUE(pipeline_->DidLoadingProgress());
+ EXPECT_FALSE(pipeline_->DidLoadingProgress());
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
+ EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
+ pipeline_->AddBufferedTimeRange(base::TimeDelta(), kDuration / 8);
+ EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
+ EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
+
+ base::TimeDelta kSeekTime = kDuration / 2;
+ ExpectSeek(kSeekTime);
+ DoSeek(kSeekTime);
+
+ EXPECT_TRUE(pipeline_->DidLoadingProgress());
+ EXPECT_FALSE(pipeline_->DidLoadingProgress());
+ pipeline_->AddBufferedByteRange(kTotalBytes / 2,
+ kTotalBytes / 2 + kTotalBytes / 8);
+ EXPECT_TRUE(pipeline_->DidLoadingProgress());
+ EXPECT_FALSE(pipeline_->DidLoadingProgress());
+ EXPECT_EQ(2u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
+ EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
+ EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(1));
+ EXPECT_EQ(kDuration / 2 + kDuration / 8,
+ pipeline_->GetBufferedTimeRanges().end(1));
+
+ pipeline_->AddBufferedTimeRange(kDuration / 4, 3 * kDuration / 8);
+ EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
+ EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
+ EXPECT_EQ(kDuration / 4, pipeline_->GetBufferedTimeRanges().start(1));
+ EXPECT_EQ(3* kDuration / 8, pipeline_->GetBufferedTimeRanges().end(1));
+ EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(2));
+ EXPECT_EQ(kDuration / 2 + kDuration / 8,
+ pipeline_->GetBufferedTimeRanges().end(2));
+}
+
+TEST_F(PipelineTest, DisableAudioRenderer) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_TRUE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ EXPECT_CALL(*demuxer_, OnAudioRendererDisabled());
+ pipeline_->OnAudioDisabled();
+
+ // Verify that ended event is fired when video ends.
+ EXPECT_CALL(callbacks_, OnEnded());
+ pipeline_->OnVideoRendererEnded();
+}
+
+TEST_F(PipelineTest, DisableAudioRendererDuringInit) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), true);
+ InitializeVideoRenderer(video_stream());
+
+ EXPECT_CALL(*demuxer_, OnAudioRendererDisabled());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ // Verify that ended event is fired when video ends.
+ EXPECT_CALL(callbacks_, OnEnded());
+ pipeline_->OnVideoRendererEnded();
+}
+
+TEST_F(PipelineTest, EndedCallback) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializeVideoRenderer(video_stream());
+ InitializePipeline(PIPELINE_OK);
+
+ // The ended callback shouldn't run until both renderers have ended.
+ pipeline_->OnAudioRendererEnded();
+ message_loop_.RunUntilIdle();
+
+ EXPECT_CALL(callbacks_, OnEnded());
+ pipeline_->OnVideoRendererEnded();
+ message_loop_.RunUntilIdle();
+}
+
+TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
+
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ // Replace the clock so we can simulate wallclock time advancing w/o using
+ // Sleep().
+ pipeline_->SetClockForTesting(new Clock(&test_tick_clock_));
+
+ InitializeDemuxer(&streams, duration);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializeVideoRenderer(video_stream());
+ InitializePipeline(PIPELINE_OK);
+
+ EXPECT_EQ(0, pipeline_->GetMediaTime().ToInternalValue());
+
+ float playback_rate = 1.0f;
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
+ EXPECT_CALL(*video_renderer_, SetPlaybackRate(playback_rate));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
+ pipeline_->SetPlaybackRate(playback_rate);
+ message_loop_.RunUntilIdle();
+
+ InSequence s;
+
+ // Verify that the clock doesn't advance since it hasn't been started by
+ // a time update from the audio stream.
+ int64 start_time = pipeline_->GetMediaTime().ToInternalValue();
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ EXPECT_EQ(pipeline_->GetMediaTime().ToInternalValue(), start_time);
+
+ // Signal end of audio stream.
+ pipeline_->OnAudioRendererEnded();
+ message_loop_.RunUntilIdle();
+
+ // Verify that the clock advances.
+ start_time = pipeline_->GetMediaTime().ToInternalValue();
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ EXPECT_GT(pipeline_->GetMediaTime().ToInternalValue(), start_time);
+
+ // Signal end of video stream and make sure OnEnded() callback occurs.
+ EXPECT_CALL(callbacks_, OnEnded());
+ pipeline_->OnVideoRendererEnded();
+}
+
+TEST_F(PipelineTest, ErrorDuringSeek) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializePipeline(PIPELINE_OK);
+
+ float playback_rate = 1.0f;
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
+ pipeline_->SetPlaybackRate(playback_rate);
+ message_loop_.RunUntilIdle();
+
+ base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
+
+ // Preroll() isn't called as the demuxer errors out first.
+ EXPECT_CALL(*audio_renderer_, Pause(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ EXPECT_CALL(*demuxer_, Seek(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_ERROR_READ));
+ EXPECT_CALL(*demuxer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ pipeline_->Seek(seek_time, base::Bind(&CallbackHelper::OnSeek,
+ base::Unretained(&callbacks_)));
+ EXPECT_CALL(callbacks_, OnSeek(PIPELINE_ERROR_READ));
+ message_loop_.RunUntilIdle();
+}
+
+// Invoked function OnError. This asserts that the pipeline does not enqueue
+// non-teardown related tasks while tearing down.
+static void TestNoCallsAfterError(
+ Pipeline* pipeline, base::MessageLoop* message_loop,
+ PipelineStatus /* status */) {
+ CHECK(pipeline);
+ CHECK(message_loop);
+
+ // When we get to this stage, the message loop should be empty.
+ EXPECT_TRUE(message_loop->IsIdleForTesting());
+
+ // Make calls on pipeline after error has occurred.
+ pipeline->SetPlaybackRate(0.5f);
+ pipeline->SetVolume(0.5f);
+
+ // No additional tasks should be queued as a result of these calls.
+ EXPECT_TRUE(message_loop->IsIdleForTesting());
+}
+
+TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializePipeline(PIPELINE_OK);
+
+ // Trigger additional requests on the pipeline during tear down from error.
+ base::Callback<void(PipelineStatus)> cb = base::Bind(
+ &TestNoCallsAfterError, pipeline_.get(), &message_loop_);
+ ON_CALL(callbacks_, OnError(_))
+ .WillByDefault(Invoke(&cb, &base::Callback<void(PipelineStatus)>::Run));
+
+ base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
+
+ // Seek() isn't called as the demuxer errors out first.
+ EXPECT_CALL(*audio_renderer_, Pause(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ EXPECT_CALL(*demuxer_, Seek(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_ERROR_READ));
+ EXPECT_CALL(*demuxer_, Stop(_))
+ .WillOnce(RunClosure<0>());
+
+ pipeline_->Seek(seek_time, base::Bind(&CallbackHelper::OnSeek,
+ base::Unretained(&callbacks_)));
+ EXPECT_CALL(callbacks_, OnSeek(PIPELINE_ERROR_READ));
+ message_loop_.RunUntilIdle();
+}
+
+TEST_F(PipelineTest, StartTimeIsZero) {
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
+ InitializeDemuxer(&streams, kDuration);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ EXPECT_EQ(base::TimeDelta(), pipeline_->GetMediaTime());
+}
+
+TEST_F(PipelineTest, StartTimeIsNonZero) {
+ const base::TimeDelta kStartTime = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
+
+ EXPECT_CALL(*demuxer_, GetStartTime())
+ .WillRepeatedly(Return(kStartTime));
+
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams, kDuration);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ EXPECT_EQ(kStartTime, pipeline_->GetMediaTime());
+}
+
+static void RunTimeCB(const AudioRenderer::TimeCB& time_cb,
+ int time_in_ms,
+ int max_time_in_ms) {
+ time_cb.Run(base::TimeDelta::FromMilliseconds(time_in_ms),
+ base::TimeDelta::FromMilliseconds(max_time_in_ms));
+}
+
+TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializePipeline(PIPELINE_OK);
+
+ float playback_rate = 1.0f;
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
+ pipeline_->SetPlaybackRate(playback_rate);
+ message_loop_.RunUntilIdle();
+
+ // Provide an initial time update so that the pipeline transitions out of the
+ // "waiting for time update" state.
+ audio_time_cb_.Run(base::TimeDelta::FromMilliseconds(100),
+ base::TimeDelta::FromMilliseconds(500));
+
+ base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
+
+ // Arrange to trigger a time update while the demuxer is in the middle of
+ // seeking. This update should be ignored by the pipeline and the clock should
+ // not get updated.
+ base::Closure closure = base::Bind(&RunTimeCB, audio_time_cb_, 300, 700);
+ EXPECT_CALL(*demuxer_, Seek(seek_time, _))
+ .WillOnce(DoAll(InvokeWithoutArgs(&closure, &base::Closure::Run),
+ RunCallback<1>(PIPELINE_OK)));
+
+ EXPECT_CALL(*audio_renderer_, Pause(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(_));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
+ EXPECT_CALL(*audio_renderer_, SetVolume(_));
+ EXPECT_CALL(*audio_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+ EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
+ DoSeek(seek_time);
+
+ EXPECT_EQ(pipeline_->GetMediaTime(), seek_time);
+
+ // Now that the seek is complete, verify that time updates advance the current
+ // time.
+ base::TimeDelta new_time = seek_time + base::TimeDelta::FromMilliseconds(100);
+ audio_time_cb_.Run(new_time, new_time);
+
+ EXPECT_EQ(pipeline_->GetMediaTime(), new_time);
+}
+
+static void DeletePipeline(scoped_ptr<Pipeline> pipeline) {
+ // |pipeline| will go out of scope.
+}
+
+TEST_F(PipelineTest, DeleteAfterStop) {
+ CreateAudioStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ InitializeDemuxer(&streams);
+ InitializeAudioRenderer(audio_stream(), false);
+ InitializePipeline(PIPELINE_OK);
+
+ ExpectStop();
+
+ Pipeline* pipeline = pipeline_.get();
+ pipeline->Stop(base::Bind(&DeletePipeline, base::Passed(&pipeline_)));
+ message_loop_.RunUntilIdle();
+}
+
+class PipelineTeardownTest : public PipelineTest {
+ public:
+ enum TeardownState {
+ kInitDemuxer,
+ kInitAudioRenderer,
+ kInitVideoRenderer,
+ kPausing,
+ kFlushing,
+ kSeeking,
+ kPrerolling,
+ kStarting,
+ kPlaying,
+ };
+
+ enum StopOrError {
+ kStop,
+ kError,
+ };
+
+ PipelineTeardownTest() {}
+ virtual ~PipelineTeardownTest() {}
+
+ void RunTest(TeardownState state, StopOrError stop_or_error) {
+ switch (state) {
+ case kInitDemuxer:
+ case kInitAudioRenderer:
+ case kInitVideoRenderer:
+ DoInitialize(state, stop_or_error);
+ break;
+
+ case kPausing:
+ case kFlushing:
+ case kSeeking:
+ case kPrerolling:
+ case kStarting:
+ DoInitialize(state, stop_or_error);
+ DoSeek(state, stop_or_error);
+ break;
+
+ case kPlaying:
+ DoInitialize(state, stop_or_error);
+ DoStopOrError(stop_or_error);
+ break;
+ }
+ }
+
+ private:
+ // TODO(scherkus): We do radically different things whether teardown is
+ // invoked via stop vs error. The teardown path should be the same,
+ // see http://crbug.com/110228
+ void DoInitialize(TeardownState state, StopOrError stop_or_error) {
+ PipelineStatus expected_status =
+ SetInitializeExpectations(state, stop_or_error);
+
+ EXPECT_CALL(callbacks_, OnStart(expected_status));
+ pipeline_->Start(
+ filter_collection_.Pass(),
+ base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnBufferingState,
+ base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnDurationChange,
+ base::Unretained(&callbacks_)));
+ message_loop_.RunUntilIdle();
+ }
+
+ PipelineStatus SetInitializeExpectations(TeardownState state,
+ StopOrError stop_or_error) {
+ PipelineStatus status = PIPELINE_OK;
+ base::Closure stop_cb = base::Bind(
+ &CallbackHelper::OnStop, base::Unretained(&callbacks_));
+
+ if (state == kInitDemuxer) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*demuxer_, Initialize(_, _))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ RunCallback<1>(PIPELINE_OK)));
+ EXPECT_CALL(callbacks_, OnStop());
+ } else {
+ status = DEMUXER_ERROR_COULD_NOT_OPEN;
+ EXPECT_CALL(*demuxer_, Initialize(_, _))
+ .WillOnce(RunCallback<1>(status));
+ }
+
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+ return status;
+ }
+
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+ InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
+
+ if (state == kInitAudioRenderer) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ RunCallback<1>(PIPELINE_OK)));
+ EXPECT_CALL(callbacks_, OnStop());
+ } else {
+ status = PIPELINE_ERROR_INITIALIZATION_FAILED;
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ .WillOnce(RunCallback<1>(status));
+ }
+
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ return status;
+ }
+
+ EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+
+ if (state == kInitVideoRenderer) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ RunCallback<1>(PIPELINE_OK)));
+ EXPECT_CALL(callbacks_, OnStop());
+ } else {
+ status = PIPELINE_ERROR_INITIALIZATION_FAILED;
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
+ .WillOnce(RunCallback<1>(status));
+ }
+
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ return status;
+ }
+
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kHaveMetadata));
+
+ // If we get here it's a successful initialization.
+ EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, Preroll(base::TimeDelta(), _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
+
+ EXPECT_CALL(*audio_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Play(_))
+ .WillOnce(RunClosure<0>());
+
+ if (status == PIPELINE_OK)
+ EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kPrerollCompleted));
+
+ return status;
+ }
+
+ void DoSeek(TeardownState state, StopOrError stop_or_error) {
+ InSequence s;
+ PipelineStatus status = SetSeekExpectations(state, stop_or_error);
+
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(callbacks_, OnSeek(status));
+
+ if (status == PIPELINE_OK) {
+ EXPECT_CALL(callbacks_, OnStop());
+ }
+
+ pipeline_->Seek(base::TimeDelta::FromSeconds(10), base::Bind(
+ &CallbackHelper::OnSeek, base::Unretained(&callbacks_)));
+ message_loop_.RunUntilIdle();
+ }
+
+ PipelineStatus SetSeekExpectations(TeardownState state,
+ StopOrError stop_or_error) {
+ PipelineStatus status = PIPELINE_OK;
+ base::Closure stop_cb = base::Bind(
+ &CallbackHelper::OnStop, base::Unretained(&callbacks_));
+
+ if (state == kPausing) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*audio_renderer_, Pause(_))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
+ } else {
+ status = PIPELINE_ERROR_READ;
+ EXPECT_CALL(*audio_renderer_, Pause(_)).WillOnce(
+ DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
+ }
+
+ return status;
+ }
+
+ EXPECT_CALL(*audio_renderer_, Pause(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Pause(_)).WillOnce(RunClosure<0>());
+
+ if (state == kFlushing) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
+ } else {
+ status = PIPELINE_ERROR_READ;
+ EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(
+ DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
+ }
+
+ return status;
+ }
+
+ EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Flush(_)).WillOnce(RunClosure<0>());
+
+ if (state == kSeeking) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*demuxer_, Seek(_, _))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ RunCallback<1>(PIPELINE_OK)));
+ } else {
+ status = PIPELINE_ERROR_READ;
+ EXPECT_CALL(*demuxer_, Seek(_, _))
+ .WillOnce(RunCallback<1>(status));
+ }
+
+ return status;
+ }
+
+ EXPECT_CALL(*demuxer_, Seek(_, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+
+ if (state == kPrerolling) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*audio_renderer_, Preroll(_, _))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ RunCallback<1>(PIPELINE_OK)));
+ } else {
+ status = PIPELINE_ERROR_READ;
+ EXPECT_CALL(*audio_renderer_, Preroll(_, _))
+ .WillOnce(RunCallback<1>(status));
+ }
+
+ return status;
+ }
+
+ EXPECT_CALL(*audio_renderer_, Preroll(_, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, Preroll(_, _))
+ .WillOnce(RunCallback<1>(PIPELINE_OK));
+
+ // Playback rate and volume are updated prior to starting.
+ EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
+ EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
+
+ if (state == kStarting) {
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(*audio_renderer_, Play(_))
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
+ } else {
+ status = PIPELINE_ERROR_READ;
+ EXPECT_CALL(*audio_renderer_, Play(_)).WillOnce(
+ DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
+ }
+ return status;
+ }
+
+ NOTREACHED() << "State not supported: " << state;
+ return status;
+ }
+
+ void DoStopOrError(StopOrError stop_or_error) {
+ InSequence s;
+
+ EXPECT_CALL(*demuxer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
+
+ if (stop_or_error == kStop) {
+ EXPECT_CALL(callbacks_, OnStop());
+ pipeline_->Stop(base::Bind(
+ &CallbackHelper::OnStop, base::Unretained(&callbacks_)));
+ } else {
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_READ));
+ pipeline_->SetErrorForTesting(PIPELINE_ERROR_READ);
+ }
+
+ message_loop_.RunUntilIdle();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(PipelineTeardownTest);
+};
+
+#define INSTANTIATE_TEARDOWN_TEST(stop_or_error, state) \
+ TEST_F(PipelineTeardownTest, stop_or_error##_##state) { \
+ RunTest(k##state, k##stop_or_error); \
+ }
+
+INSTANTIATE_TEARDOWN_TEST(Stop, InitDemuxer);
+INSTANTIATE_TEARDOWN_TEST(Stop, InitAudioRenderer);
+INSTANTIATE_TEARDOWN_TEST(Stop, InitVideoRenderer);
+INSTANTIATE_TEARDOWN_TEST(Stop, Pausing);
+INSTANTIATE_TEARDOWN_TEST(Stop, Flushing);
+INSTANTIATE_TEARDOWN_TEST(Stop, Seeking);
+INSTANTIATE_TEARDOWN_TEST(Stop, Prerolling);
+INSTANTIATE_TEARDOWN_TEST(Stop, Starting);
+INSTANTIATE_TEARDOWN_TEST(Stop, Playing);
+
+INSTANTIATE_TEARDOWN_TEST(Error, InitDemuxer);
+INSTANTIATE_TEARDOWN_TEST(Error, InitAudioRenderer);
+INSTANTIATE_TEARDOWN_TEST(Error, InitVideoRenderer);
+INSTANTIATE_TEARDOWN_TEST(Error, Pausing);
+INSTANTIATE_TEARDOWN_TEST(Error, Flushing);
+INSTANTIATE_TEARDOWN_TEST(Error, Seeking);
+INSTANTIATE_TEARDOWN_TEST(Error, Prerolling);
+INSTANTIATE_TEARDOWN_TEST(Error, Starting);
+INSTANTIATE_TEARDOWN_TEST(Error, Playing);
+
+} // namespace media
diff --git a/chromium/media/base/ranges.cc b/chromium/media/base/ranges.cc
new file mode 100644
index 00000000000..b7b2b55adb8
--- /dev/null
+++ b/chromium/media/base/ranges.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/ranges.h"
+
+namespace media {
+
+template<>
+void Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
+ const base::TimeDelta& rhs) const {
+ DCHECK(lhs < rhs) << lhs.ToInternalValue() << " < " << rhs.ToInternalValue();
+}
+
+} // namespace media
diff --git a/chromium/media/base/ranges.h b/chromium/media/base/ranges.h
new file mode 100644
index 00000000000..917f30e141e
--- /dev/null
+++ b/chromium/media/base/ranges.h
@@ -0,0 +1,162 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_RANGES_H_
+#define MEDIA_BASE_RANGES_H_
+
+#include <algorithm>
+#include <ostream>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Ranges allows holding an ordered list of ranges of [start,end) intervals.
+// The canonical example use-case is holding the list of ranges of buffered
+// bytes or times in a <video> tag.
+template<class T> // Endpoint type; typically a base::TimeDelta or an int64.
+class Ranges {
+ public:
+ // Allow copy & assign.
+
+ // Add (start,end) to this object, coallescing overlaps as appropriate.
+ // Returns the number of stored ranges, post coallescing.
+ size_t Add(T start, T end);
+
+ // Return the number of disjoint ranges.
+ size_t size() const;
+
+ // Return the "i"'th range's start & end (0-based).
+ T start(int i) const;
+ T end(int i) const;
+
+ // Clear all ranges.
+ void clear();
+
+ // Computes the intersection between this range and |other|.
+ Ranges<T> IntersectionWith(const Ranges<T>& other) const;
+
+ private:
+ // Wrapper around DCHECK_LT allowing comparisons of operator<<'able T's.
+ void DCheckLT(const T& lhs, const T& rhs) const;
+
+ // Disjoint, in increasing order of start.
+ std::vector<std::pair<T, T> > ranges_;
+};
+
+//////////////////////////////////////////////////////////////////////
+// EVERYTHING BELOW HERE IS IMPLEMENTATION DETAIL!!
+//////////////////////////////////////////////////////////////////////
+
+template<class T>
+size_t Ranges<T>::Add(T start, T end) {
+ if (start == end) // Nothing to be done with empty ranges.
+ return ranges_.size();
+
+ DCheckLT(start, end);
+ size_t i;
+ // Walk along the array of ranges until |start| is no longer larger than the
+ // current interval's end.
+ for (i = 0; i < ranges_.size() && ranges_[i].second < start; ++i) {
+ // Empty body
+ }
+
+ // Now we know |start| belongs in the i'th slot.
+ // If i is the end of the range, append new range and done.
+ if (i == ranges_.size()) {
+ ranges_.push_back(std::make_pair(start, end));
+ return ranges_.size();
+ }
+
+ // If |end| is less than i->first, then [start,end) is a new (non-overlapping)
+ // i'th entry pushing everyone else back, and done.
+ if (end < ranges_[i].first) {
+ ranges_.insert(ranges_.begin() + i, std::make_pair(start, end));
+ return ranges_.size();
+ }
+
+ // Easy cases done. Getting here means there is overlap between [start,end)
+ // and the existing ranges.
+
+ // Now: start <= i->second && i->first <= end
+ if (start < ranges_[i].first)
+ ranges_[i].first = start;
+ if (ranges_[i].second < end)
+ ranges_[i].second = end;
+
+ // Now: [start,end) is contained in the i'th range, and we'd be done, except
+ // for the fact that the newly-extended i'th range might now overlap
+ // subsequent ranges. Merge until discontinuities appear. Note that there's
+ // no need to test/merge previous ranges, since needing that would mean the
+ // original loop went too far.
+ while ((i + 1) < ranges_.size() &&
+ ranges_[i + 1].first <= ranges_[i].second) {
+ ranges_[i].second = std::max(ranges_[i].second, ranges_[i + 1].second);
+ ranges_.erase(ranges_.begin() + i + 1);
+ }
+
+ return ranges_.size();
+}
+
+template<>
+MEDIA_EXPORT void
+ Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
+ const base::TimeDelta& rhs) const;
+
+template<class T>
+void Ranges<T>::DCheckLT(const T& lhs, const T& rhs) const {
+ DCHECK_LT(lhs, rhs);
+}
+
+template<class T>
+size_t Ranges<T>::size() const {
+ return ranges_.size();
+}
+
+template<class T>
+T Ranges<T>::start(int i) const {
+ return ranges_[i].first;
+}
+
+template<class T>
+T Ranges<T>::end(int i) const {
+ return ranges_[i].second;
+}
+
+template<class T>
+void Ranges<T>::clear() {
+ ranges_.clear();
+}
+
+template<class T>
+Ranges<T> Ranges<T>::IntersectionWith(const Ranges<T>& other) const {
+ Ranges<T> result;
+
+ size_t i = 0;
+ size_t j = 0;
+
+ while (i < size() && j < other.size()) {
+ T max_start = std::max(start(i), other.start(j));
+ T min_end = std::min(end(i), other.end(j));
+
+ // Add an intersection range to the result if the ranges overlap.
+ if (max_start < min_end)
+ result.Add(max_start, min_end);
+
+ if (end(i) < other.end(j))
+ ++i;
+ else
+ ++j;
+ }
+
+ return result;
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_RANGES_H_
diff --git a/chromium/media/base/ranges_unittest.cc b/chromium/media/base/ranges_unittest.cc
new file mode 100644
index 00000000000..ad9f0d31a47
--- /dev/null
+++ b/chromium/media/base/ranges_unittest.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/ranges.h"
+
+#include <sstream>
+
+#include "base/strings/string_piece.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Human-readable output operator, for debugging/testability.
+template<class T>
+std::ostream& operator<<(std::ostream& os, const Ranges<T>& r) {
+ os << "{ ";
+ for(size_t i = 0; i < r.size(); ++i)
+ os << "[" << r.start(i) << "," << r.end(i) << ") ";
+ os << "}";
+ return os;
+}
+
+// Helper method for asserting stringified form of |r| matches expectation.
+template<class T>
+static void ExpectRanges(const Ranges<T>& r,
+ const base::StringPiece& expected_string) {
+ std::stringstream ss;
+ ss << r;
+ ASSERT_EQ(ss.str(), expected_string);
+}
+
+#define ASSERT_RANGES(ranges, expectation) \
+ ASSERT_NO_FATAL_FAILURE(ExpectRanges(ranges, expectation));
+
+TEST(RangesTest, SimpleTests) {
+ Ranges<int> r;
+ ASSERT_EQ(r.size(), 0u) << r;
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.size(), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,1) }");
+ ASSERT_EQ(r.Add(2, 3), 2u) << r;
+ ASSERT_RANGES(r, "{ [0,1) [2,3) }");
+ ASSERT_EQ(r.Add(1, 2), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,3) }");
+ ASSERT_EQ(r.Add(1, 4), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,4) }");
+ ASSERT_EQ(r.Add(7, 9), 2u) << r;
+ ASSERT_EQ(r.Add(5, 6), 3u) << r;
+ ASSERT_RANGES(r, "{ [0,4) [5,6) [7,9) }");
+ ASSERT_EQ(r.Add(6, 7), 2u) << r;
+ ASSERT_RANGES(r, "{ [0,4) [5,9) }");
+}
+
+TEST(RangesTest, ExtendRange) {
+ Ranges<double> r;
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(0.5, 1.5), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,1.5) }");
+
+ r.clear();
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(-0.5, 0.5), 1u) << r;
+ ASSERT_RANGES(r, "{ [-0.5,1) }");
+
+ r.clear();
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(2, 3), 2u) << r;
+ ASSERT_EQ(r.Add(4, 5), 3u) << r;
+ ASSERT_EQ(r.Add(0.5, 1.5), 3u) << r;
+ ASSERT_RANGES(r, "{ [0,1.5) [2,3) [4,5) }");
+
+ r.clear();
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(2, 3), 2u) << r;
+ ASSERT_EQ(r.Add(4, 5), 3u) << r;
+ ASSERT_EQ(r.Add(1.5, 2.5), 3u) << r;
+ ASSERT_RANGES(r, "{ [0,1) [1.5,3) [4,5) }");
+}
+
+TEST(RangesTest, CoalesceRanges) {
+ Ranges<double> r;
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(2, 3), 2u) << r;
+ ASSERT_EQ(r.Add(4, 5), 3u) << r;
+ ASSERT_EQ(r.Add(0.5, 2.5), 2u) << r;
+ ASSERT_RANGES(r, "{ [0,3) [4,5) }");
+
+ r.clear();
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(2, 3), 2u) << r;
+ ASSERT_EQ(r.Add(4, 5), 3u) << r;
+ ASSERT_EQ(r.Add(0.5, 4.5), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,5) }");
+
+ r.clear();
+ ASSERT_EQ(r.Add(0, 1), 1u) << r;
+ ASSERT_EQ(r.Add(1, 2), 1u) << r;
+ ASSERT_RANGES(r, "{ [0,2) }");
+}
+
+TEST(RangesTest, IntersectionWith) {
+ Ranges<int> a;
+ Ranges<int> b;
+
+ ASSERT_EQ(a.Add(0, 1), 1u) << a;
+ ASSERT_EQ(a.Add(4, 7), 2u) << a;
+ ASSERT_EQ(a.Add(10, 12), 3u) << a;
+
+ // Test intersections with an empty range.
+ ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b, "{ }");
+ ASSERT_RANGES(a.IntersectionWith(b), "{ }");
+ ASSERT_RANGES(b.IntersectionWith(a), "{ }");
+
+ // Test intersections with a completely overlaping range.
+ ASSERT_EQ(b.Add(-1, 13), 1u) << b;
+ ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b, "{ [-1,13) }");
+ ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [4,7) [10,12) }");
+
+ // Test intersections with a disjoint ranges.
+ b.clear();
+ ASSERT_EQ(b.Add(1, 4), 1u) << b;
+ ASSERT_EQ(b.Add(8, 9), 2u) << b;
+ ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b, "{ [1,4) [8,9) }");
+ ASSERT_RANGES(a.IntersectionWith(b), "{ }");
+ ASSERT_RANGES(b.IntersectionWith(a), "{ }");
+
+ // Test intersections with partially overlapping ranges.
+ b.clear();
+ ASSERT_EQ(b.Add(0, 3), 1u) << b;
+ ASSERT_EQ(b.Add(5, 11), 2u) << b;
+ ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b, "{ [0,3) [5,11) }");
+ ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [5,7) [10,11) }");
+ ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [5,7) [10,11) }");
+
+ // Test intersection with a range that starts at the beginning of the
+ // first range and ends at the end of the last range.
+ b.clear();
+ ASSERT_EQ(b.Add(0, 12), 1u) << b;
+ ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b, "{ [0,12) }");
+ ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [4,7) [10,12) }");
+ ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [4,7) [10,12) }");
+}
+
+} // namespace media
diff --git a/chromium/media/base/run_all_unittests.cc b/chromium/media/base/run_all_unittests.cc
new file mode 100644
index 00000000000..4274634d0b7
--- /dev/null
+++ b/chromium/media/base/run_all_unittests.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/test/test_suite.h"
+#include "build/build_config.h"
+#include "media/base/media.h"
+#include "media/base/media_switches.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#include "media/base/android/media_jni_registrar.h"
+#endif
+
+class TestSuiteNoAtExit : public base::TestSuite {
+ public:
+ TestSuiteNoAtExit(int argc, char** argv) : TestSuite(argc, argv) {}
+ virtual ~TestSuiteNoAtExit() {}
+ protected:
+ virtual void Initialize() OVERRIDE;
+};
+
+void TestSuiteNoAtExit::Initialize() {
+ // Run TestSuite::Initialize first so that logging is initialized.
+ base::TestSuite::Initialize();
+
+#if defined(OS_ANDROID)
+ // Register JNI bindings for android.
+ JNIEnv* env = base::android::AttachCurrentThread();
+ media::RegisterJni(env);
+#endif
+
+ // Run this here instead of main() to ensure an AtExitManager is already
+ // present.
+ media::InitializeMediaLibraryForTesting();
+ // Enable VP8 alpha support for all media tests.
+ // TODO(tomfinegan): Remove this once the VP8 alpha flag is removed or
+ // negated.
+ CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ cmd_line->AppendSwitch(switches::kEnableVp8AlphaPlayback);
+}
+
+int main(int argc, char** argv) {
+ return TestSuiteNoAtExit(argc, argv).Run();
+}
diff --git a/chromium/media/base/sample_format.cc b/chromium/media/base/sample_format.cc
new file mode 100644
index 00000000000..a4791cd6861
--- /dev/null
+++ b/chromium/media/base/sample_format.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/sample_format.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
+ switch (sample_format) {
+ case kUnknownSampleFormat:
+ return 0;
+ case kSampleFormatU8:
+ return 1;
+ case kSampleFormatS16:
+ case kSampleFormatPlanarS16:
+ return 2;
+ case kSampleFormatS32:
+ case kSampleFormatF32:
+ case kSampleFormatPlanarF32:
+ return 4;
+ case kSampleFormatMax:
+ break;
+ }
+
+ NOTREACHED() << "Invalid sample format provided: " << sample_format;
+ return 0;
+}
+
+const char* SampleFormatToString(SampleFormat sample_format) {
+ switch(sample_format) {
+ case kUnknownSampleFormat:
+ return "Unknown sample format";
+ case kSampleFormatU8:
+ return "Unsigned 8-bit with bias of 128";
+ case kSampleFormatS16:
+ return "Signed 16-bit";
+ case kSampleFormatS32:
+ return "Signed 32-bit";
+ case kSampleFormatF32:
+ return "Float 32-bit";
+ case kSampleFormatPlanarS16:
+ return "Signed 16-bit planar";
+ case kSampleFormatPlanarF32:
+ return "Float 32-bit planar";
+ case kSampleFormatMax:
+ break;
+ }
+ NOTREACHED() << "Invalid sample format provided: " << sample_format;
+ return "";
+}
+
+} // namespace media
diff --git a/chromium/media/base/sample_format.h b/chromium/media/base/sample_format.h
new file mode 100644
index 00000000000..3d2799fa128
--- /dev/null
+++ b/chromium/media/base/sample_format.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SAMPLE_FORMAT_H
+#define MEDIA_BASE_SAMPLE_FORMAT_H
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+enum SampleFormat {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a sample format replace it with a dummy value; when
+ // adding a sample format, do so at the bottom before kSampleFormatMax.
+ kUnknownSampleFormat = 0,
+ kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
+ kSampleFormatS16, // Signed 16-bit.
+ kSampleFormatS32, // Signed 32-bit.
+ kSampleFormatF32, // Float 32-bit.
+ kSampleFormatPlanarS16, // Signed 16-bit planar.
+ kSampleFormatPlanarF32, // Float 32-bit planar.
+
+ // Must always be last!
+ kSampleFormatMax
+};
+
+// Returns the number of bytes used per channel for the specified
+// |sample_format|.
+MEDIA_EXPORT int SampleFormatToBytesPerChannel(SampleFormat sample_format);
+
+// Returns the name of the sample format as a string
+MEDIA_EXPORT const char* SampleFormatToString(SampleFormat sample_format);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SAMPLE_FORMAT_H
diff --git a/chromium/media/base/scoped_histogram_timer.h b/chromium/media/base/scoped_histogram_timer.h
new file mode 100644
index 00000000000..a7f69b0ab9b
--- /dev/null
+++ b/chromium/media/base/scoped_histogram_timer.h
@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
+#define MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
+
+#include "base/metrics/histogram.h"
+#include "base/time/time.h"
+
+// Scoped class which logs its time on this earth as a UMA statistic. Must be
+// a #define macro since UMA macros prevent variables as names. The nested
+// macro is necessary to expand __COUNTER__ to an actual value.
+#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
+ SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, __COUNTER__)
+
+#define SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, key) \
+ SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, key)
+
+#define SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, key) \
+ class ScopedHistogramTimer##key { \
+ public: \
+ ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
+ ~ScopedHistogramTimer##key() { \
+ base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
+ UMA_HISTOGRAM_TIMES(name, elapsed); \
+ } \
+ private: \
+ base::TimeTicks constructed_; \
+ } scoped_histogram_timer_##key
+
+#endif // MEDIA_BASE_SCOPED_HISTOGRAM_TIMER_H_
diff --git a/chromium/media/base/scoped_histogram_timer_unittest.cc b/chromium/media/base/scoped_histogram_timer_unittest.cc
new file mode 100644
index 00000000000..b8893f9713b
--- /dev/null
+++ b/chromium/media/base/scoped_histogram_timer_unittest.cc
@@ -0,0 +1,16 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+#include "media/base/scoped_histogram_timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(ScopedHistogramTimer, TwoTimersOneScope) {
+ SCOPED_UMA_HISTOGRAM_TIMER("TestTimer0");
+ SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/base/seekable_buffer.cc b/chromium/media/base/seekable_buffer.cc
new file mode 100644
index 00000000000..019ae4651ca
--- /dev/null
+++ b/chromium/media/base/seekable_buffer.cc
@@ -0,0 +1,277 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/seekable_buffer.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/data_buffer.h"
+
+namespace media {
+
+SeekableBuffer::SeekableBuffer(int backward_capacity, int forward_capacity)
+ : current_buffer_offset_(0),
+ backward_capacity_(backward_capacity),
+ backward_bytes_(0),
+ forward_capacity_(forward_capacity),
+ forward_bytes_(0),
+ current_time_(kNoTimestamp()) {
+ current_buffer_ = buffers_.begin();
+}
+
+SeekableBuffer::~SeekableBuffer() {
+}
+
+void SeekableBuffer::Clear() {
+ buffers_.clear();
+ current_buffer_ = buffers_.begin();
+ current_buffer_offset_ = 0;
+ backward_bytes_ = 0;
+ forward_bytes_ = 0;
+ current_time_ = kNoTimestamp();
+}
+
+int SeekableBuffer::Read(uint8* data, int size) {
+ DCHECK(data);
+ return InternalRead(data, size, true, 0);
+}
+
+int SeekableBuffer::Peek(uint8* data, int size, int forward_offset) {
+ DCHECK(data);
+ return InternalRead(data, size, false, forward_offset);
+}
+
+bool SeekableBuffer::GetCurrentChunk(const uint8** data, int* size) const {
+ BufferQueue::iterator current_buffer = current_buffer_;
+ int current_buffer_offset = current_buffer_offset_;
+ // Advance position if we are in the end of the current buffer.
+ while (current_buffer != buffers_.end() &&
+ current_buffer_offset >= (*current_buffer)->data_size()) {
+ ++current_buffer;
+ current_buffer_offset = 0;
+ }
+ if (current_buffer == buffers_.end())
+ return false;
+ *data = (*current_buffer)->data() + current_buffer_offset;
+ *size = (*current_buffer)->data_size() - current_buffer_offset;
+ return true;
+}
+
+bool SeekableBuffer::Append(const scoped_refptr<DataBuffer>& buffer_in) {
+ if (buffers_.empty() && buffer_in->timestamp() != kNoTimestamp()) {
+ current_time_ = buffer_in->timestamp();
+ }
+
+ // Since the forward capacity is only used to check the criteria for buffer
+ // full, we always append data to the buffer.
+ buffers_.push_back(buffer_in);
+
+ // After we have written the first buffer, update |current_buffer_| to point
+ // to it.
+ if (current_buffer_ == buffers_.end()) {
+ DCHECK_EQ(0, forward_bytes_);
+ current_buffer_ = buffers_.begin();
+ }
+
+ // Update the |forward_bytes_| counter since we have more bytes.
+ forward_bytes_ += buffer_in->data_size();
+
+ // Advise the user to stop append if the amount of forward bytes exceeds
+ // the forward capacity. A false return value means the user should stop
+ // appending more data to this buffer.
+ if (forward_bytes_ >= forward_capacity_)
+ return false;
+ return true;
+}
+
+bool SeekableBuffer::Append(const uint8* data, int size) {
+ if (size > 0) {
+ scoped_refptr<DataBuffer> data_buffer = DataBuffer::CopyFrom(data, size);
+ return Append(data_buffer);
+ } else {
+ // Return true if we have forward capacity.
+ return forward_bytes_ < forward_capacity_;
+ }
+}
+
+bool SeekableBuffer::Seek(int32 offset) {
+ if (offset > 0)
+ return SeekForward(offset);
+ else if (offset < 0)
+ return SeekBackward(-offset);
+ return true;
+}
+
+bool SeekableBuffer::SeekForward(int size) {
+ // Perform seeking forward only if we have enough bytes in the queue.
+ if (size > forward_bytes_)
+ return false;
+
+ // Do a read of |size| bytes.
+ int taken = InternalRead(NULL, size, true, 0);
+ DCHECK_EQ(taken, size);
+ return true;
+}
+
+bool SeekableBuffer::SeekBackward(int size) {
+ if (size > backward_bytes_)
+ return false;
+ // Record the number of bytes taken.
+ int taken = 0;
+ // Loop until we taken enough bytes and rewind by the desired |size|.
+ while (taken < size) {
+ // |current_buffer_| can never be invalid when we are in this loop. It can
+ // only be invalid before any data is appended. The invalid case should be
+ // handled by checks before we enter this loop.
+ DCHECK(current_buffer_ != buffers_.end());
+
+ // We try to consume at most |size| bytes in the backward direction. We also
+ // have to account for the offset we are in the current buffer, so take the
+ // minimum between the two to determine the amount of bytes to take from the
+ // current buffer.
+ int consumed = std::min(size - taken, current_buffer_offset_);
+
+ // Decreases the offset in the current buffer since we are rewinding.
+ current_buffer_offset_ -= consumed;
+
+ // Increase the amount of bytes taken in the backward direction. This
+ // determines when to stop the loop.
+ taken += consumed;
+
+ // Forward bytes increases and backward bytes decreases by the amount
+ // consumed in the current buffer.
+ forward_bytes_ += consumed;
+ backward_bytes_ -= consumed;
+ DCHECK_GE(backward_bytes_, 0);
+
+ // The current buffer pointed by current iterator has been consumed. Move
+ // the iterator backward so it points to the previous buffer.
+ if (current_buffer_offset_ == 0) {
+ if (current_buffer_ == buffers_.begin())
+ break;
+ // Move the iterator backward.
+ --current_buffer_;
+ // Set the offset into the current buffer to be the buffer size as we
+ // are preparing for rewind for next iteration.
+ current_buffer_offset_ = (*current_buffer_)->data_size();
+ }
+ }
+
+ UpdateCurrentTime(current_buffer_, current_buffer_offset_);
+
+ DCHECK_EQ(taken, size);
+ return true;
+}
+
+void SeekableBuffer::EvictBackwardBuffers() {
+ // Advances the iterator until we hit the current pointer.
+ while (backward_bytes_ > backward_capacity_) {
+ BufferQueue::iterator i = buffers_.begin();
+ if (i == current_buffer_)
+ break;
+ scoped_refptr<DataBuffer> buffer = *i;
+ backward_bytes_ -= buffer->data_size();
+ DCHECK_GE(backward_bytes_, 0);
+
+ buffers_.erase(i);
+ }
+}
+
+int SeekableBuffer::InternalRead(uint8* data, int size,
+ bool advance_position,
+ int forward_offset) {
+ // Counts how many bytes are actually read from the buffer queue.
+ int taken = 0;
+
+ BufferQueue::iterator current_buffer = current_buffer_;
+ int current_buffer_offset = current_buffer_offset_;
+
+ int bytes_to_skip = forward_offset;
+ while (taken < size) {
+ // |current_buffer| is valid since the first time this buffer is appended
+ // with data.
+ if (current_buffer == buffers_.end())
+ break;
+
+ scoped_refptr<DataBuffer> buffer = *current_buffer;
+
+ int remaining_bytes_in_buffer =
+ buffer->data_size() - current_buffer_offset;
+
+ if (bytes_to_skip == 0) {
+ // Find the right amount to copy from the current buffer referenced by
+ // |buffer|. We shall copy no more than |size| bytes in total and each
+ // single step copied no more than the current buffer size.
+ int copied = std::min(size - taken, remaining_bytes_in_buffer);
+
+ // |data| is NULL if we are seeking forward, so there's no need to copy.
+ if (data)
+ memcpy(data + taken, buffer->data() + current_buffer_offset, copied);
+
+ // Increase total number of bytes copied, which regulates when to end this
+ // loop.
+ taken += copied;
+
+ // We have read |copied| bytes from the current buffer. Advances the
+ // offset.
+ current_buffer_offset += copied;
+ } else {
+ int skipped = std::min(remaining_bytes_in_buffer, bytes_to_skip);
+ current_buffer_offset += skipped;
+ bytes_to_skip -= skipped;
+ }
+
+ // The buffer has been consumed.
+ if (current_buffer_offset == buffer->data_size()) {
+ if (advance_position) {
+ // Next buffer may not have timestamp, so we need to update current
+ // timestamp before switching to the next buffer.
+ UpdateCurrentTime(current_buffer, current_buffer_offset);
+ }
+
+ BufferQueue::iterator next = current_buffer;
+ ++next;
+ // If we are at the last buffer, don't advance.
+ if (next == buffers_.end())
+ break;
+
+ // Advances the iterator.
+ current_buffer = next;
+ current_buffer_offset = 0;
+ }
+ }
+
+ if (advance_position) {
+ // We have less forward bytes and more backward bytes. Updates these
+ // counters by |taken|.
+ forward_bytes_ -= taken;
+ backward_bytes_ += taken;
+ DCHECK_GE(forward_bytes_, 0);
+ DCHECK(current_buffer_ != buffers_.end() || forward_bytes_ == 0);
+
+ current_buffer_ = current_buffer;
+ current_buffer_offset_ = current_buffer_offset;
+
+ UpdateCurrentTime(current_buffer_, current_buffer_offset_);
+ EvictBackwardBuffers();
+ }
+
+ return taken;
+}
+
+void SeekableBuffer::UpdateCurrentTime(BufferQueue::iterator buffer,
+ int offset) {
+ // Garbage values are unavoidable, so this check will remain.
+ if (buffer != buffers_.end() &&
+ (*buffer)->timestamp() != kNoTimestamp()) {
+ int64 time_offset = ((*buffer)->duration().InMicroseconds() * offset) /
+ (*buffer)->data_size();
+
+ current_time_ = (*buffer)->timestamp() +
+ base::TimeDelta::FromMicroseconds(time_offset);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/seekable_buffer.h b/chromium/media/base/seekable_buffer.h
new file mode 100644
index 00000000000..41d26fea185
--- /dev/null
+++ b/chromium/media/base/seekable_buffer.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SeekableBuffer to support backward and forward seeking in a buffer for
+// reading a media data source.
+//
+// In order to support backward and forward seeking, this class buffers data in
+// both backward and forward directions, the current read position can be reset
+// to anywhere in the buffered data.
+//
+// The amount of data buffered is regulated by two variables at construction,
+// |backward_capacity| and |forward_capacity|.
+//
+// In the case of reading and seeking forward, the current read position
+// advances and there will be more data in the backward direction. If backward
+// bytes exceeds |backward_capacity|, the exceeding bytes are evicted and thus
+// backward_bytes() will always be less than or equal to |backward_capacity|.
+// The eviction will be caused by Read() and Seek() in the forward direction and
+// is done internally when the mentioned criteria is fulfilled.
+//
+// In the case of appending data to the buffer, there is an advisory limit of
+// how many bytes can be kept in the forward direction, regulated by
+// |forward_capacity|. The append operation (by calling Append()) that caused
+// forward bytes to exceed |forward_capacity| will have a return value that
+// advises a halt of append operation, further append operations are allowed but
+// are not advised. Since this class is used as a backend buffer for caching
+// media files downloaded from network we cannot afford losing data, we can
+// only advise a halt of further writing to this buffer.
+// This class is not inherently thread-safe. Concurrent access must be
+// externally serialized.
+
+#ifndef MEDIA_BASE_SEEKABLE_BUFFER_H_
+#define MEDIA_BASE_SEEKABLE_BUFFER_H_
+
+#include <list>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+class DataBuffer;
+
+class MEDIA_EXPORT SeekableBuffer {
+ public:
+ // Constructs an instance with |forward_capacity| and |backward_capacity|.
+ // The values are in bytes.
+ SeekableBuffer(int backward_capacity, int forward_capacity);
+
+ ~SeekableBuffer();
+
+ // Clears the buffer queue.
+ void Clear();
+
+ // Reads a maximum of |size| bytes into |data| from the current read
+ // position. Returns the number of bytes read.
+ // The current read position will advance by the amount of bytes read. If
+ // reading caused backward_bytes() to exceed backward_capacity(), an eviction
+ // of the backward buffer will be done internally.
+ int Read(uint8* data, int size);
+
+ // Copies up to |size| bytes from current position to |data|. Returns
+ // number of bytes copied. Doesn't advance current position. Optionally
+ // starts at a |forward_offset| from current position.
+ int Peek(uint8* data, int size) { return Peek(data, size, 0); }
+ int Peek(uint8* data, int size, int forward_offset);
+
+ // Returns pointer to the current chunk of data that is being consumed.
+ // If there is no data left in the buffer false is returned, otherwise
+ // true is returned and |data| and |size| are updated. The returned
+ // |data| value becomes invalid when Read(), Append() or Seek()
+ // are called.
+ bool GetCurrentChunk(const uint8** data, int* size) const;
+
+ // Appends |buffer_in| to this buffer. Returns false if forward_bytes() is
+ // greater than or equals to forward_capacity(), true otherwise. The data
+ // is added to the buffer in any case.
+ bool Append(const scoped_refptr<DataBuffer>& buffer_in);
+
+ // Appends |size| bytes of |data| to the buffer. Result is the same
+ // as for Append(Buffer*).
+ bool Append(const uint8* data, int size);
+
+ // Moves the read position by |offset| bytes. If |offset| is positive, the
+ // current read position is moved forward. If negative, the current read
+ // position is moved backward. A zero |offset| value will keep the current
+ // read position stationary.
+ // If |offset| exceeds bytes buffered in either direction, reported by
+ // forward_bytes() when seeking forward and backward_bytes() when seeking
+ // backward, the seek operation will fail and return value will be false.
+ // If the seek operation fails, the current read position will not be updated.
+ // If a forward seeking caused backward_bytes() to exceed backward_capacity(),
+ // this method call will cause an eviction of the backward buffer.
+ bool Seek(int32 offset);
+
+ // Returns the number of bytes buffered beyond the current read position.
+ int forward_bytes() const { return forward_bytes_; }
+
+ // Returns the number of bytes buffered that precedes the current read
+ // position.
+ int backward_bytes() const { return backward_bytes_; }
+
+ // Sets the forward_capacity to |new_forward_capacity| bytes.
+ void set_forward_capacity(int new_forward_capacity) {
+ forward_capacity_ = new_forward_capacity;
+ }
+
+ // Sets the backward_capacity to |new_backward_capacity| bytes.
+ void set_backward_capacity(int new_backward_capacity) {
+ backward_capacity_ = new_backward_capacity;
+ }
+
+ // Returns the maximum number of bytes that should be kept in the forward
+ // direction.
+ int forward_capacity() const { return forward_capacity_; }
+
+ // Returns the maximum number of bytes that should be kept in the backward
+ // direction.
+ int backward_capacity() const { return backward_capacity_; }
+
+ // Returns the current timestamp, taking into account current offset. The
+ // value calculated based on the timestamp of the current buffer. If
+ // timestamp for the current buffer is set to 0 or the data was added with
+ // Append(const uint*, int), then returns value that corresponds to the
+ // last position in a buffer that had timestamp set.
+ // kNoTimestamp() is returned if no buffers we read from had timestamp set.
+ base::TimeDelta current_time() const { return current_time_; }
+
+ private:
+ // Definition of the buffer queue.
+ typedef std::list<scoped_refptr<DataBuffer> > BufferQueue;
+
+ // A helper method to evict buffers in the backward direction until backward
+ // bytes is within the backward capacity.
+ void EvictBackwardBuffers();
+
+ // An internal method shared by Read() and SeekForward() that actually does
+ // reading. It reads a maximum of |size| bytes into |data|. Returns the number
+ // of bytes read. The current read position will be moved forward by the
+ // number of bytes read. If |data| is NULL, only the current read position
+ // will advance but no data will be copied.
+ int InternalRead(
+ uint8* data, int size, bool advance_position, int forward_offset);
+
+ // A helper method that moves the current read position forward by |size|
+ // bytes.
+ // If the return value is true, the operation completed successfully.
+ // If the return value is false, |size| is greater than forward_bytes() and
+ // the seek operation failed. The current read position is not updated.
+ bool SeekForward(int size);
+
+ // A helper method that moves the current read position backward by |size|
+ // bytes.
+ // If the return value is true, the operation completed successfully.
+ // If the return value is false, |size| is greater than backward_bytes() and
+ // the seek operation failed. The current read position is not updated.
+ bool SeekBackward(int size);
+
+ // Updates |current_time_| with the time that corresponds to the
+ // specified position in the buffer.
+ void UpdateCurrentTime(BufferQueue::iterator buffer, int offset);
+
+ BufferQueue::iterator current_buffer_;
+ BufferQueue buffers_;
+ int current_buffer_offset_;
+
+ int backward_capacity_;
+ int backward_bytes_;
+
+ int forward_capacity_;
+ int forward_bytes_;
+
+ // Keeps track of the most recent time we've seen in case the |buffers_| is
+ // empty when our owner asks what time it is.
+ base::TimeDelta current_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(SeekableBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_SEEKABLE_BUFFER_H_
diff --git a/chromium/media/base/seekable_buffer_unittest.cc b/chromium/media/base/seekable_buffer_unittest.cc
new file mode 100644
index 00000000000..c5e3fb635ed
--- /dev/null
+++ b/chromium/media/base/seekable_buffer_unittest.cc
@@ -0,0 +1,352 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/data_buffer.h"
+#include "media/base/seekable_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class SeekableBufferTest : public testing::Test {
+ public:
+ SeekableBufferTest() : buffer_(kBufferSize, kBufferSize) {
+ }
+
+ protected:
+ static const int kDataSize = 409600;
+ static const int kBufferSize = 4096;
+ static const int kWriteSize = 512;
+
+ virtual void SetUp() {
+ // Setup seed.
+ int seed = static_cast<int32>(base::Time::Now().ToInternalValue());
+ srand(seed);
+ VLOG(1) << "Random seed: " << seed;
+
+ // Creates a test data.
+ for (int i = 0; i < kDataSize; i++)
+ data_[i] = static_cast<char>(rand());
+ }
+
+ int GetRandomInt(int maximum) {
+ return rand() % maximum + 1;
+ }
+
+ SeekableBuffer buffer_;
+ uint8 data_[kDataSize];
+ uint8 write_buffer_[kDataSize];
+};
+
+TEST_F(SeekableBufferTest, RandomReadWrite) {
+ int write_position = 0;
+ int read_position = 0;
+ while (read_position < kDataSize) {
+ // Write a random amount of data.
+ int write_size = GetRandomInt(kBufferSize);
+ write_size = std::min(write_size, kDataSize - write_position);
+ bool should_append = buffer_.Append(data_ + write_position, write_size);
+ write_position += write_size;
+ EXPECT_GE(write_position, read_position);
+ EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
+ EXPECT_EQ(should_append, buffer_.forward_bytes() < kBufferSize)
+ << "Incorrect buffer full reported";
+
+ // Peek a random amount of data.
+ int copy_size = GetRandomInt(kBufferSize);
+ int bytes_copied = buffer_.Peek(write_buffer_, copy_size);
+ EXPECT_GE(copy_size, bytes_copied);
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_copied));
+
+ // Read a random amount of data.
+ int read_size = GetRandomInt(kBufferSize);
+ int bytes_read = buffer_.Read(write_buffer_, read_size);
+ EXPECT_GE(read_size, bytes_read);
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
+ read_position += bytes_read;
+ EXPECT_GE(write_position, read_position);
+ EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
+ }
+}
+
+TEST_F(SeekableBufferTest, ReadWriteSeek) {
+ const int kReadSize = kWriteSize / 4;
+
+ for (int i = 0; i < 10; ++i) {
+ // Write until buffer is full.
+ for (int j = 0; j < kBufferSize; j += kWriteSize) {
+ bool should_append = buffer_.Append(data_ + j, kWriteSize);
+ EXPECT_EQ(j < kBufferSize - kWriteSize, should_append)
+ << "Incorrect buffer full reported";
+ EXPECT_EQ(j + kWriteSize, buffer_.forward_bytes());
+ }
+
+ // Simulate a read and seek pattern. Each loop reads 4 times, each time
+ // reading a quarter of |kWriteSize|.
+ int read_position = 0;
+ int forward_bytes = kBufferSize;
+ for (int j = 0; j < kBufferSize; j += kWriteSize) {
+ // Read.
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ forward_bytes -= kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+ read_position += kReadSize;
+
+ // Seek forward.
+ EXPECT_TRUE(buffer_.Seek(2 * kReadSize));
+ forward_bytes -= 2 * kReadSize;
+ read_position += 2 * kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+
+ // Copy.
+ EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+
+ // Read.
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ forward_bytes -= kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+ read_position += kReadSize;
+
+ // Seek backward.
+ EXPECT_TRUE(buffer_.Seek(-3 * static_cast<int32>(kReadSize)));
+ forward_bytes += 3 * kReadSize;
+ read_position -= 3 * kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+
+ // Copy.
+ EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+
+ // Read.
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ forward_bytes -= kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+ read_position += kReadSize;
+
+ // Copy.
+ EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+
+ // Read.
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ forward_bytes -= kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
+ read_position += kReadSize;
+
+ // Seek forward.
+ EXPECT_TRUE(buffer_.Seek(kReadSize));
+ forward_bytes -= kReadSize;
+ read_position += kReadSize;
+ EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
+ }
+ }
+}
+
+TEST_F(SeekableBufferTest, BufferFull) {
+ const int kMaxWriteSize = 2 * kBufferSize;
+
+ // Write and expect the buffer to be not full.
+ for (int i = 0; i < kBufferSize - kWriteSize; i += kWriteSize) {
+ EXPECT_TRUE(buffer_.Append(data_ + i, kWriteSize));
+ EXPECT_EQ(i + kWriteSize, buffer_.forward_bytes());
+ }
+
+ // Write until we have kMaxWriteSize bytes in the buffer. Buffer is full in
+ // these writes.
+ for (int i = buffer_.forward_bytes(); i < kMaxWriteSize; i += kWriteSize) {
+ EXPECT_FALSE(buffer_.Append(data_ + i, kWriteSize));
+ EXPECT_EQ(i + kWriteSize, buffer_.forward_bytes());
+ }
+
+ // Read until the buffer is empty.
+ int read_position = 0;
+ while (buffer_.forward_bytes()) {
+ // Read a random amount of data.
+ int read_size = GetRandomInt(kBufferSize);
+ int forward_bytes = buffer_.forward_bytes();
+ int bytes_read = buffer_.Read(write_buffer_, read_size);
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
+ if (read_size > forward_bytes)
+ EXPECT_EQ(forward_bytes, bytes_read);
+ else
+ EXPECT_EQ(read_size, bytes_read);
+ read_position += bytes_read;
+ EXPECT_GE(kMaxWriteSize, read_position);
+ EXPECT_EQ(kMaxWriteSize - read_position, buffer_.forward_bytes());
+ }
+
+ // Expects we have no bytes left.
+ EXPECT_EQ(0, buffer_.forward_bytes());
+ EXPECT_EQ(0, buffer_.Read(write_buffer_, 1));
+}
+
+TEST_F(SeekableBufferTest, SeekBackward) {
+ EXPECT_EQ(0, buffer_.forward_bytes());
+ EXPECT_EQ(0, buffer_.backward_bytes());
+ EXPECT_FALSE(buffer_.Seek(1));
+ EXPECT_FALSE(buffer_.Seek(-1));
+
+ const int kReadSize = 256;
+
+ // Write into buffer until it's full.
+ for (int i = 0; i < kBufferSize; i += kWriteSize) {
+ // Write a random amount of data.
+ buffer_.Append(data_ + i, kWriteSize);
+ }
+
+ // Read until buffer is empty.
+ for (int i = 0; i < kBufferSize; i += kReadSize) {
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + i, kReadSize));
+ }
+
+ // Seek backward.
+ EXPECT_TRUE(buffer_.Seek(-static_cast<int32>(kBufferSize)));
+ EXPECT_FALSE(buffer_.Seek(-1));
+
+ // Read again.
+ for (int i = 0; i < kBufferSize; i += kReadSize) {
+ EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + i, kReadSize));
+ }
+}
+
+TEST_F(SeekableBufferTest, GetCurrentChunk) {
+ const int kSeekSize = kWriteSize / 3;
+
+ scoped_refptr<DataBuffer> buffer = DataBuffer::CopyFrom(data_, kWriteSize);
+
+ const uint8* data;
+ int size;
+ EXPECT_FALSE(buffer_.GetCurrentChunk(&data, &size));
+
+ buffer_.Append(buffer.get());
+ EXPECT_TRUE(buffer_.GetCurrentChunk(&data, &size));
+ EXPECT_EQ(data, buffer->data());
+ EXPECT_EQ(size, buffer->data_size());
+
+ buffer_.Seek(kSeekSize);
+ EXPECT_TRUE(buffer_.GetCurrentChunk(&data, &size));
+ EXPECT_EQ(data, buffer->data() + kSeekSize);
+ EXPECT_EQ(size, buffer->data_size() - kSeekSize);
+}
+
+TEST_F(SeekableBufferTest, SeekForward) {
+ int write_position = 0;
+ int read_position = 0;
+ while (read_position < kDataSize) {
+ for (int i = 0; i < 10 && write_position < kDataSize; ++i) {
+ // Write a random amount of data.
+ int write_size = GetRandomInt(kBufferSize);
+ write_size = std::min(write_size, kDataSize - write_position);
+
+ bool should_append = buffer_.Append(data_ + write_position, write_size);
+ write_position += write_size;
+ EXPECT_GE(write_position, read_position);
+ EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
+ EXPECT_EQ(should_append, buffer_.forward_bytes() < kBufferSize)
+ << "Incorrect buffer full status reported";
+ }
+
+ // Read a random amount of data.
+ int seek_size = GetRandomInt(kBufferSize);
+ if (buffer_.Seek(seek_size))
+ read_position += seek_size;
+ EXPECT_GE(write_position, read_position);
+ EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
+
+ // Read a random amount of data.
+ int read_size = GetRandomInt(kBufferSize);
+ int bytes_read = buffer_.Read(write_buffer_, read_size);
+ EXPECT_GE(read_size, bytes_read);
+ EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
+ read_position += bytes_read;
+ EXPECT_GE(write_position, read_position);
+ EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
+ }
+}
+
+TEST_F(SeekableBufferTest, AllMethods) {
+ EXPECT_EQ(0, buffer_.Read(write_buffer_, 0));
+ EXPECT_EQ(0, buffer_.Read(write_buffer_, 1));
+ EXPECT_TRUE(buffer_.Seek(0));
+ EXPECT_FALSE(buffer_.Seek(-1));
+ EXPECT_FALSE(buffer_.Seek(1));
+ EXPECT_EQ(0, buffer_.forward_bytes());
+ EXPECT_EQ(0, buffer_.backward_bytes());
+}
+
+TEST_F(SeekableBufferTest, GetTime) {
+ const int64 kNoTS = kNoTimestamp().ToInternalValue();
+ const struct {
+ int64 first_time_useconds;
+ int64 duration_useconds;
+ int consume_bytes;
+ int64 expected_time;
+ } tests[] = {
+ { kNoTS, 1000000, 0, kNoTS },
+ { kNoTS, 4000000, 0, kNoTS },
+ { kNoTS, 8000000, 0, kNoTS },
+ { kNoTS, 1000000, kWriteSize / 2, kNoTS },
+ { kNoTS, 4000000, kWriteSize / 2, kNoTS },
+ { kNoTS, 8000000, kWriteSize / 2, kNoTS },
+ { kNoTS, 1000000, kWriteSize, kNoTS },
+ { kNoTS, 4000000, kWriteSize, kNoTS },
+ { kNoTS, 8000000, kWriteSize, kNoTS },
+ { 0, 1000000, 0, 0 },
+ { 0, 4000000, 0, 0 },
+ { 0, 8000000, 0, 0 },
+ { 0, 1000000, kWriteSize / 2, 500000 },
+ { 0, 4000000, kWriteSize / 2, 2000000 },
+ { 0, 8000000, kWriteSize / 2, 4000000 },
+ { 0, 1000000, kWriteSize, 1000000 },
+ { 0, 4000000, kWriteSize, 4000000 },
+ { 0, 8000000, kWriteSize, 8000000 },
+ { 5, 1000000, 0, 5 },
+ { 5, 4000000, 0, 5 },
+ { 5, 8000000, 0, 5 },
+ { 5, 1000000, kWriteSize / 2, 500005 },
+ { 5, 4000000, kWriteSize / 2, 2000005 },
+ { 5, 8000000, kWriteSize / 2, 4000005 },
+ { 5, 1000000, kWriteSize, 1000005 },
+ { 5, 4000000, kWriteSize, 4000005 },
+ { 5, 8000000, kWriteSize, 8000005 },
+ };
+
+ // current_time() must initially return kNoTimestamp().
+ EXPECT_EQ(kNoTimestamp().ToInternalValue(),
+ buffer_.current_time().ToInternalValue());
+
+ scoped_refptr<DataBuffer> buffer = DataBuffer::CopyFrom(data_, kWriteSize);
+
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(tests); ++i) {
+ buffer->set_timestamp(base::TimeDelta::FromMicroseconds(
+ tests[i].first_time_useconds));
+ buffer->set_duration(base::TimeDelta::FromMicroseconds(
+ tests[i].duration_useconds));
+ buffer_.Append(buffer.get());
+ EXPECT_TRUE(buffer_.Seek(tests[i].consume_bytes));
+
+ int64 actual = buffer_.current_time().ToInternalValue();
+
+ EXPECT_EQ(tests[i].expected_time, actual) << "With test = { start:"
+ << tests[i].first_time_useconds << ", duration:"
+ << tests[i].duration_useconds << ", consumed:"
+ << tests[i].consume_bytes << " }\n";
+
+ buffer_.Clear();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/serial_runner.cc b/chromium/media/base/serial_runner.cc
new file mode 100644
index 00000000000..fa391331467
--- /dev/null
+++ b/chromium/media/base/serial_runner.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/serial_runner.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+
+namespace media {
+
+// Converts a bound function accepting a Closure into a bound function
+// accepting a PipelineStatusCB. Since closures have no way of reporting a
+// status |status_cb| is executed with PIPELINE_OK.
+static void RunBoundClosure(
+ const SerialRunner::BoundClosure& bound_closure,
+ const PipelineStatusCB& status_cb) {
+ bound_closure.Run(base::Bind(status_cb, PIPELINE_OK));
+}
+
+// Runs |status_cb| with |last_status| on |message_loop|.
+static void RunOnMessageLoop(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const PipelineStatusCB& status_cb,
+ PipelineStatus last_status) {
+ // Force post to permit cancellation of a series in the scenario where all
+ // bound functions run on the same thread.
+ message_loop->PostTask(FROM_HERE, base::Bind(status_cb, last_status));
+}
+
+SerialRunner::Queue::Queue() {}
+SerialRunner::Queue::~Queue() {}
+
+void SerialRunner::Queue::Push(
+ const BoundClosure& bound_closure) {
+ bound_fns_.push(base::Bind(&RunBoundClosure, bound_closure));
+}
+
+void SerialRunner::Queue::Push(
+ const BoundPipelineStatusCB& bound_status_cb) {
+ bound_fns_.push(bound_status_cb);
+}
+
+SerialRunner::BoundPipelineStatusCB SerialRunner::Queue::Pop() {
+ BoundPipelineStatusCB bound_fn = bound_fns_.front();
+ bound_fns_.pop();
+ return bound_fn;
+}
+
+bool SerialRunner::Queue::empty() {
+ return bound_fns_.empty();
+}
+
+SerialRunner::SerialRunner(
+ const Queue& bound_fns, const PipelineStatusCB& done_cb)
+ : weak_this_(this),
+ message_loop_(base::MessageLoopProxy::current()),
+ bound_fns_(bound_fns),
+ done_cb_(done_cb) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr(),
+ PIPELINE_OK));
+}
+
+SerialRunner::~SerialRunner() {}
+
+scoped_ptr<SerialRunner> SerialRunner::Run(
+ const Queue& bound_fns, const PipelineStatusCB& done_cb) {
+ scoped_ptr<SerialRunner> callback_series(
+ new SerialRunner(bound_fns, done_cb));
+ return callback_series.Pass();
+}
+
+void SerialRunner::RunNextInSeries(PipelineStatus last_status) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!done_cb_.is_null());
+
+ if (bound_fns_.empty() || last_status != PIPELINE_OK) {
+ base::ResetAndReturn(&done_cb_).Run(last_status);
+ return;
+ }
+
+ BoundPipelineStatusCB bound_fn = bound_fns_.Pop();
+ bound_fn.Run(base::Bind(&RunOnMessageLoop, message_loop_, base::Bind(
+ &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr())));
+}
+
+} // namespace media
diff --git a/chromium/media/base/serial_runner.h b/chromium/media/base/serial_runner.h
new file mode 100644
index 00000000000..a59c7753c9c
--- /dev/null
+++ b/chromium/media/base/serial_runner.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SERIAL_RUNNER_H_
+#define MEDIA_BASE_SERIAL_RUNNER_H_
+
+#include <queue>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/pipeline_status.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+// Runs a series of bound functions accepting Closures or PipelineStatusCB.
+// SerialRunner doesn't use regular Closure/PipelineStatusCBs as it late binds
+// the completion callback as the series progresses.
+class SerialRunner {
+ public:
+ typedef base::Callback<void(const base::Closure&)> BoundClosure;
+ typedef base::Callback<void(const PipelineStatusCB&)> BoundPipelineStatusCB;
+
+ // Serial queue of bound functions to run.
+ class Queue {
+ public:
+ Queue();
+ ~Queue();
+
+ void Push(const BoundClosure& bound_fn);
+ void Push(const BoundPipelineStatusCB& bound_fn);
+
+ private:
+ friend class SerialRunner;
+
+ BoundPipelineStatusCB Pop();
+ bool empty();
+
+ std::queue<BoundPipelineStatusCB> bound_fns_;
+ };
+
+ // Executes the bound functions in series, executing |done_cb| when finished.
+ //
+ // All bound functions are executed on the thread that Run() is called on,
+ // including |done_cb|.
+ //
+ // Deleting the object will prevent execution of any unstarted bound
+ // functions, including |done_cb|.
+ static scoped_ptr<SerialRunner> Run(
+ const Queue& bound_fns, const PipelineStatusCB& done_cb);
+
+ private:
+ friend struct base::DefaultDeleter<SerialRunner>;
+
+ SerialRunner(const Queue& bound_fns, const PipelineStatusCB& done_cb);
+ ~SerialRunner();
+
+ void RunNextInSeries(PipelineStatus last_status);
+
+ base::WeakPtrFactory<SerialRunner> weak_this_;
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ Queue bound_fns_;
+ PipelineStatusCB done_cb_;
+
+ DISALLOW_COPY_AND_ASSIGN(SerialRunner);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_SERIAL_RUNNER_H_
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv.h b/chromium/media/base/simd/convert_rgb_to_yuv.h
new file mode 100644
index 00000000000..d3bb4ca7067
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
+#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
+
+#include "base/basictypes.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+// These methods are exported for testing purposes only. Library users should
+// only call the methods listed in yuv_convert.h.
+
+MEDIA_EXPORT void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB32ToYUV_SSE2(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB32ToYUV_SSE2_Reference(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB32ToYUV_C(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB24ToYUV_C(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_c.cc b/chromium/media/base/simd/convert_rgb_to_yuv_c.cc
new file mode 100644
index 00000000000..4917d37bf36
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_c.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/convert_rgb_to_yuv.h"
+
+namespace media {
+
+static int clip_byte(int x) {
+ if (x > 255)
+ return 255;
+ else if (x < 0)
+ return 0;
+ else
+ return x;
+}
+
+void ConvertRGB32ToYUV_C(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+#if defined(OS_ANDROID)
+ const int r = 0;
+ const int g = 1;
+ const int b = 2;
+#else
+ const int r = 2;
+ const int g = 1;
+ const int b = 0;
+#endif
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; ++j) {
+ // Since the input pixel format is RGB32, there are 4 bytes per pixel.
+ const uint8* pixel = rgbframe + 4 * j;
+ yplane[j] = clip_byte(((pixel[r] * 66 + pixel[g] * 129 +
+ pixel[b] * 25 + 128) >> 8) + 16);
+ if (i % 2 == 0 && j % 2 == 0) {
+ uplane[j / 2] = clip_byte(((pixel[r] * -38 + pixel[g] * -74 +
+ pixel[b] * 112 + 128) >> 8) + 128);
+ vplane[j / 2] = clip_byte(((pixel[r] * 112 + pixel[g] * -94 +
+ pixel[b] * -18 + 128) >> 8) + 128);
+ }
+ }
+ rgbframe += rgbstride;
+ yplane += ystride;
+ if (i % 2 == 0) {
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+ }
+}
+
+void ConvertRGB24ToYUV_C(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; ++j) {
+ // Since the input pixel format is RGB24, there are 3 bytes per pixel.
+ const uint8* pixel = rgbframe + 3 * j;
+ yplane[j] = clip_byte(((pixel[2] * 66 + pixel[1] * 129 +
+ pixel[0] * 25 + 128) >> 8) + 16);
+ if (i % 2 == 0 && j % 2 == 0) {
+ uplane[j / 2] = clip_byte(((pixel[2] * -38 + pixel[1] * -74 +
+ pixel[0] * 112 + 128) >> 8) + 128);
+ vplane[j / 2] = clip_byte(((pixel[2] * 112 + pixel[1] * -94 +
+ pixel[0] * -18 + 128) >> 8) + 128);
+ }
+ }
+
+ rgbframe += rgbstride;
+ yplane += ystride;
+ if (i % 2 == 0) {
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
new file mode 100644
index 00000000000..f99a2fef840
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_sse2.cc
@@ -0,0 +1,397 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/build_config.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#else
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+
+namespace media {
+
+#define FIX_SHIFT 12
+#define FIX(x) ((x) * (1 << FIX_SHIFT))
+
+// Define a convenient macro to do static cast.
+#define INT16_FIX(x) static_cast<int16>(FIX(x))
+
+SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
+ INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
+ INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.291), -INT16_FIX(0.148), 0,
+ INT16_FIX(0.439), -INT16_FIX(0.291), -INT16_FIX(0.148), 0,
+ -INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
+ -INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
+};
+
+#undef INT16_FIX
+
+// This is the final offset for the conversion from signed yuv values to
+// unsigned values. It is arranged so that offset of 16 is applied to Y
+// components and 128 is added to UV components for 2 pixels.
+SIMD_ALIGNED(const int32 kYOffset[4]) = {16, 16, 16, 16};
+
+static inline int Clamp(int value) {
+ if (value < 0)
+ return 0;
+ if (value > 255)
+ return 255;
+ return value;
+}
+
+static inline int RGBToY(int r, int g, int b) {
+ int y = ConvertRGBAToYUV_kTable[0] * b +
+ ConvertRGBAToYUV_kTable[1] * g +
+ ConvertRGBAToYUV_kTable[2] * r;
+ y >>= FIX_SHIFT;
+ return Clamp(y + 16);
+}
+
+static inline int RGBToU(int r, int g, int b, int shift) {
+ int u = ConvertRGBAToYUV_kTable[8] * b +
+ ConvertRGBAToYUV_kTable[9] * g +
+ ConvertRGBAToYUV_kTable[10] * r;
+ u >>= FIX_SHIFT + shift;
+ return Clamp(u + 128);
+}
+
+static inline int RGBToV(int r, int g, int b, int shift) {
+ int v = ConvertRGBAToYUV_kTable[16] * b +
+ ConvertRGBAToYUV_kTable[17] * g +
+ ConvertRGBAToYUV_kTable[18] * r;
+ v >>= FIX_SHIFT + shift;
+ return Clamp(v + 128);
+}
+
+#define CONVERT_Y(rgb_buf, y_buf) \
+ b = *rgb_buf++; \
+ g = *rgb_buf++; \
+ r = *rgb_buf++; \
+ ++rgb_buf; \
+ sum_b += b; \
+ sum_g += g; \
+ sum_r += r; \
+ *y_buf++ = RGBToY(r, g, b);
+
+static inline void ConvertRGBToYUV_V2H2(const uint8* rgb_buf_1,
+ const uint8* rgb_buf_2,
+ uint8* y_buf_1,
+ uint8* y_buf_2,
+ uint8* u_buf,
+ uint8* v_buf) {
+ int sum_b = 0;
+ int sum_g = 0;
+ int sum_r = 0;
+ int r, g, b;
+
+
+
+ CONVERT_Y(rgb_buf_1, y_buf_1);
+ CONVERT_Y(rgb_buf_1, y_buf_1);
+ CONVERT_Y(rgb_buf_2, y_buf_2);
+ CONVERT_Y(rgb_buf_2, y_buf_2);
+ *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 2);
+ *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 2);
+}
+
+static inline void ConvertRGBToYUV_V2H1(const uint8* rgb_buf_1,
+ const uint8* rgb_buf_2,
+ uint8* y_buf_1,
+ uint8* y_buf_2,
+ uint8* u_buf,
+ uint8* v_buf) {
+ int sum_b = 0;
+ int sum_g = 0;
+ int sum_r = 0;
+ int r, g, b;
+
+ CONVERT_Y(rgb_buf_1, y_buf_1);
+ CONVERT_Y(rgb_buf_2, y_buf_2);
+ *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 1);
+ *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 1);
+}
+
+static inline void ConvertRGBToYUV_V1H2(const uint8* rgb_buf,
+ uint8* y_buf,
+ uint8* u_buf,
+ uint8* v_buf) {
+ int sum_b = 0;
+ int sum_g = 0;
+ int sum_r = 0;
+ int r, g, b;
+
+ CONVERT_Y(rgb_buf, y_buf);
+ CONVERT_Y(rgb_buf, y_buf);
+ *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 1);
+ *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 1);
+}
+
+static inline void ConvertRGBToYUV_V1H1(const uint8* rgb_buf,
+ uint8* y_buf,
+ uint8* u_buf,
+ uint8* v_buf) {
+ int sum_b = 0;
+ int sum_g = 0;
+ int sum_r = 0;
+ int r, g, b;
+
+ CONVERT_Y(rgb_buf, y_buf);
+ *u_buf++ = RGBToU(r, g, b, 0);
+ *v_buf++ = RGBToV(r, g, b, 0);
+}
+
+static void ConvertRGB32ToYUVRow_SSE2(const uint8* rgb_buf_1,
+ const uint8* rgb_buf_2,
+ uint8* y_buf_1,
+ uint8* y_buf_2,
+ uint8* u_buf,
+ uint8* v_buf,
+ int width) {
+ while (width >= 4) {
+ // Name for the Y pixels:
+ // Row 1: a b c d
+ // Row 2: e f g h
+ //
+ // First row 4 pixels.
+ __m128i rgb_row_1 = _mm_loadu_si128(
+ reinterpret_cast<const __m128i*>(rgb_buf_1));
+ __m128i zero_1 = _mm_xor_si128(rgb_row_1, rgb_row_1);
+
+ __m128i y_table = _mm_load_si128(
+ reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable));
+
+ __m128i rgb_a_b = _mm_unpackhi_epi8(rgb_row_1, zero_1);
+ rgb_a_b = _mm_madd_epi16(rgb_a_b, y_table);
+
+ __m128i rgb_c_d = _mm_unpacklo_epi8(rgb_row_1, zero_1);
+ rgb_c_d = _mm_madd_epi16(rgb_c_d, y_table);
+
+ // Do a crazh shuffle so that we get:
+ // v------------ Multiply Add
+ // BG: a b c d
+ // A0: a b c d
+ __m128i bg_abcd = _mm_castps_si128(
+ _mm_shuffle_ps(
+ _mm_castsi128_ps(rgb_c_d),
+ _mm_castsi128_ps(rgb_a_b),
+ (3 << 6) | (1 << 4) | (3 << 2) | 1));
+ __m128i r_abcd = _mm_castps_si128(
+ _mm_shuffle_ps(
+ _mm_castsi128_ps(rgb_c_d),
+ _mm_castsi128_ps(rgb_a_b),
+ (2 << 6) | (2 << 2)));
+ __m128i y_abcd = _mm_add_epi32(bg_abcd, r_abcd);
+
+ // Down shift back to 8bits range.
+ __m128i y_offset = _mm_load_si128(
+ reinterpret_cast<const __m128i*>(kYOffset));
+ y_abcd = _mm_srai_epi32(y_abcd, FIX_SHIFT);
+ y_abcd = _mm_add_epi32(y_abcd, y_offset);
+ y_abcd = _mm_packs_epi32(y_abcd, y_abcd);
+ y_abcd = _mm_packus_epi16(y_abcd, y_abcd);
+ *reinterpret_cast<uint32*>(y_buf_1) = _mm_cvtsi128_si32(y_abcd);
+ y_buf_1 += 4;
+
+ // Second row 4 pixels.
+ __m128i rgb_row_2 = _mm_loadu_si128(
+ reinterpret_cast<const __m128i*>(rgb_buf_2));
+ __m128i zero_2 = _mm_xor_si128(rgb_row_2, rgb_row_2);
+ __m128i rgb_e_f = _mm_unpackhi_epi8(rgb_row_2, zero_2);
+ __m128i rgb_g_h = _mm_unpacklo_epi8(rgb_row_2, zero_2);
+
+ // Add two rows together.
+ __m128i rgb_ae_bf =
+ _mm_add_epi16(_mm_unpackhi_epi8(rgb_row_1, zero_2), rgb_e_f);
+ __m128i rgb_cg_dh =
+ _mm_add_epi16(_mm_unpacklo_epi8(rgb_row_1, zero_2), rgb_g_h);
+
+ // Multiply add like the previous row.
+ rgb_e_f = _mm_madd_epi16(rgb_e_f, y_table);
+ rgb_g_h = _mm_madd_epi16(rgb_g_h, y_table);
+
+ __m128i bg_efgh = _mm_castps_si128(
+ _mm_shuffle_ps(_mm_castsi128_ps(rgb_g_h),
+ _mm_castsi128_ps(rgb_e_f),
+ (3 << 6) | (1 << 4) | (3 << 2) | 1));
+ __m128i r_efgh = _mm_castps_si128(
+ _mm_shuffle_ps(_mm_castsi128_ps(rgb_g_h),
+ _mm_castsi128_ps(rgb_e_f),
+ (2 << 6) | (2 << 2)));
+ __m128i y_efgh = _mm_add_epi32(bg_efgh, r_efgh);
+ y_efgh = _mm_srai_epi32(y_efgh, FIX_SHIFT);
+ y_efgh = _mm_add_epi32(y_efgh, y_offset);
+ y_efgh = _mm_packs_epi32(y_efgh, y_efgh);
+ y_efgh = _mm_packus_epi16(y_efgh, y_efgh);
+ *reinterpret_cast<uint32*>(y_buf_2) = _mm_cvtsi128_si32(y_efgh);
+ y_buf_2 += 4;
+
+ __m128i rgb_ae_cg = _mm_castps_si128(
+ _mm_shuffle_ps(_mm_castsi128_ps(rgb_cg_dh),
+ _mm_castsi128_ps(rgb_ae_bf),
+ (3 << 6) | (2 << 4) | (3 << 2) | 2));
+ __m128i rgb_bf_dh = _mm_castps_si128(
+ _mm_shuffle_ps(_mm_castsi128_ps(rgb_cg_dh),
+ _mm_castsi128_ps(rgb_ae_bf),
+ (1 << 6) | (1 << 2)));
+
+ // This is a 2x2 subsampling for 2 pixels.
+ __m128i rgb_abef_cdgh = _mm_add_epi16(rgb_ae_cg, rgb_bf_dh);
+
+ // Do a multiply add with U table.
+ __m128i u_a_b = _mm_madd_epi16(
+ rgb_abef_cdgh,
+ _mm_load_si128(
+ reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable + 8)));
+ u_a_b = _mm_add_epi32(_mm_shuffle_epi32(u_a_b, ((3 << 2) | 1)),
+ _mm_shuffle_epi32(u_a_b, (2 << 2)));
+ // Right shift 14 because of 12 from fixed point and 2 from subsampling.
+ u_a_b = _mm_srai_epi32(u_a_b, FIX_SHIFT + 2);
+ __m128i uv_offset = _mm_slli_epi32(y_offset, 3);
+ u_a_b = _mm_add_epi32(u_a_b, uv_offset);
+ u_a_b = _mm_packs_epi32(u_a_b, u_a_b);
+ u_a_b = _mm_packus_epi16(u_a_b, u_a_b);
+ *reinterpret_cast<uint16*>(u_buf) = _mm_extract_epi16(u_a_b, 0);
+ u_buf += 2;
+
+ __m128i v_a_b = _mm_madd_epi16(
+ rgb_abef_cdgh,
+ _mm_load_si128(
+ reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable + 16)));
+ v_a_b = _mm_add_epi32(_mm_shuffle_epi32(v_a_b, ((3 << 2) | 1)),
+ _mm_shuffle_epi32(v_a_b, (2 << 2)));
+ v_a_b = _mm_srai_epi32(v_a_b, FIX_SHIFT + 2);
+ v_a_b = _mm_add_epi32(v_a_b, uv_offset);
+ v_a_b = _mm_packs_epi32(v_a_b, v_a_b);
+ v_a_b = _mm_packus_epi16(v_a_b, v_a_b);
+ *reinterpret_cast<uint16*>(v_buf) = _mm_extract_epi16(v_a_b, 0);
+ v_buf += 2;
+
+ rgb_buf_1 += 16;
+ rgb_buf_2 += 16;
+
+ // Move forward by 4 pixels.
+ width -= 4;
+ }
+
+ // Just use C code to convert the remaining pixels.
+ if (width >= 2) {
+ ConvertRGBToYUV_V2H2(rgb_buf_1, rgb_buf_2, y_buf_1, y_buf_2, u_buf, v_buf);
+ rgb_buf_1 += 8;
+ rgb_buf_2 += 8;
+ y_buf_1 += 2;
+ y_buf_2 += 2;
+ ++u_buf;
+ ++v_buf;
+ width -= 2;
+ }
+
+ if (width)
+ ConvertRGBToYUV_V2H1(rgb_buf_1, rgb_buf_2, y_buf_1, y_buf_2, u_buf, v_buf);
+}
+
+extern void ConvertRGB32ToYUV_SSE2(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ while (height >= 2) {
+ ConvertRGB32ToYUVRow_SSE2(rgbframe,
+ rgbframe + rgbstride,
+ yplane,
+ yplane + ystride,
+ uplane,
+ vplane,
+ width);
+ rgbframe += 2 * rgbstride;
+ yplane += 2 * ystride;
+ uplane += uvstride;
+ vplane += uvstride;
+ height -= 2;
+ }
+
+ if (!height)
+ return;
+
+ // Handle the last row.
+ while (width >= 2) {
+ ConvertRGBToYUV_V1H2(rgbframe, yplane, uplane, vplane);
+ rgbframe += 8;
+ yplane += 2;
+ ++uplane;
+ ++vplane;
+ width -= 2;
+ }
+
+ if (width)
+ ConvertRGBToYUV_V1H1(rgbframe, yplane, uplane, vplane);
+}
+
+void ConvertRGB32ToYUV_SSE2_Reference(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ while (height >= 2) {
+ int i = 0;
+
+ // Convert a 2x2 block.
+ while (i + 2 <= width) {
+ ConvertRGBToYUV_V2H2(rgbframe + i * 4,
+ rgbframe + rgbstride + i * 4,
+ yplane + i,
+ yplane + ystride + i,
+ uplane + i / 2,
+ vplane + i / 2);
+ i += 2;
+ }
+
+ // Convert the last pixel of two rows.
+ if (i < width) {
+ ConvertRGBToYUV_V2H1(rgbframe + i * 4,
+ rgbframe + rgbstride + i * 4,
+ yplane + i,
+ yplane + ystride + i,
+ uplane + i / 2,
+ vplane + i / 2);
+ }
+
+ rgbframe += 2 * rgbstride;
+ yplane += 2 * ystride;
+ uplane += uvstride;
+ vplane += uvstride;
+ height -= 2;
+ }
+
+ if (!height)
+ return;
+
+ // Handle the last row.
+ while (width >= 2) {
+ ConvertRGBToYUV_V1H2(rgbframe, yplane, uplane, vplane);
+ rgbframe += 8;
+ yplane += 2;
+ ++uplane;
+ ++vplane;
+ width -= 2;
+ }
+
+ // Handle the last pixel in the last row.
+ if (width)
+ ConvertRGBToYUV_V1H1(rgbframe, yplane, uplane, vplane);
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.asm b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.asm
new file mode 100644
index 00000000000..ffbcbbcebd8
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.asm
@@ -0,0 +1,318 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses SSE, SSE2, SSE3, and SSSE3, which are supported by all ATOM
+; processors.
+;
+ SECTION_TEXT
+ CPU SSE, SSE3, SSE3, SSSE3
+
+;
+; XMM registers representing constants. We must not use these registers as
+; destination operands.
+; for (int i = 0; i < 16; i += 4) {
+; xmm7.b[i] = 25; xmm7.b[i+1] = 2; xmm7.b[i+2] = 66; xmm7.b[i+3] = 0;
+; xmm6.b[i] = 0; xmm6.b[i+1] = 127; xmm6.b[i+2] = 0; xmm6.b[i+3] = 0;
+; xmm5.b[i] = 112; xmm5.b[i+1] = -74; xmm5.b[i+2] = -38; xmm5.b[i+3] = 0;
+; xmm4.b[i] = -18; xmm4.b[i+1] = -94; xmm4.b[i+2] = 112; xmm4.b[i+3] = 0;
+; }
+;
+%define XMM_CONST_Y0 xmm7
+%define XMM_CONST_Y1 xmm6
+%define XMM_CONST_U xmm5
+%define XMM_CONST_V xmm4
+%define XMM_CONST_128 xmm3
+
+;
+; LOAD_XMM %1 (xmm), %2 (imm32)
+; Loads an immediate value to an XMM register.
+; %1.d[0] = %1.d[1] = %1.d[2] = %1.d[3] = %2;
+;
+%macro LOAD_XMM 2
+ mov TEMPd, %2
+ movd %1, TEMPd
+ pshufd %1, %1, 00000000B
+%endmacro
+
+;
+; UNPACKRGB %1 (xmm), %2 (imm8)
+; Unpacks one RGB pixel in the specified XMM register.
+; for (int i = 15; i > %2; --i) %1.b[i] = %1.b[i - 1];
+; %1.b[%2] = 0;
+; for (int i = %2 - 1; i >= 0; --i) %1.b[i] = %1.b[i];
+;
+%macro UNPACKRGB 2
+ movdqa xmm1, %1
+ psrldq xmm1, %2
+ pslldq xmm1, %2
+ pxor %1, xmm1
+ pslldq xmm1, 1
+ por %1, xmm1
+%endmacro
+
+;
+; READ_ARGB %1 (xmm), %2 (imm)
+; Read the specified number of ARGB (or RGB) pixels from the source and store
+; them to the destination xmm register. If the input format is RGB, we read RGB
+; pixels and convert them to ARGB pixels. (For this case, the alpha values of
+; the output pixels become 0.)
+;
+%macro READ_ARGB 2
+
+%if PIXELSIZE == 4
+
+ ; Read ARGB pixels from the source. (This macro assumes the input buffer may
+ ; not be aligned to a 16-byte boundary.)
+%if %2 == 1
+ movd %1, DWORD [ARGBq + WIDTHq * 4 * 2]
+%elif %2 == 2
+ movq %1, QWORD [ARGBq + WIDTHq * 4 * 2]
+%elif %2 == 4
+ movdqu %1, DQWORD [ARGBq + WIDTHq * 4 * 2]
+%else
+%error unsupported number of pixels.
+%endif
+
+%elif PIXELSIZE == 3
+
+ ; Read RGB pixels from the source and convert them to ARGB pixels.
+%if %2 == 1
+ ; Read one RGB pixel and convert it to one ARGB pixel.
+ ; Save the WIDTH register to xmm1. (This macro needs to break it.)
+ MOVq xmm1, WIDTHq
+
+ ; Once read three bytes from the source to TEMPd, and copy it to the
+ ; destination xmm register.
+ lea WIDTHq, [WIDTHq + WIDTHq * 2]
+ movzx TEMPd, BYTE [ARGBq + WIDTHq * 2 + 2]
+ shl TEMPd, 16
+ mov TEMPw, WORD [ARGBq + WIDTHq * 2]
+ movd %1, TEMPd
+
+ ; Restore the WIDTH register.
+ MOVq WIDTHq, xmm1
+%elif %2 == 2
+ ; Read two RGB pixels and convert them to two ARGB pixels.
+ ; Read six bytes from the source to the destination xmm register.
+ mov TEMPq, WIDTHq
+ lea TEMPq, [TEMPq + TEMPq * 2]
+ movd %1, DWORD [ARGBq + TEMPq * 2]
+ pinsrw %1, WORD [ARGBq + TEMPq * 2 + 4], 3
+
+ ; Fill the alpha values of these RGB pixels with 0 and convert them to two
+ ; ARGB pixels.
+ UNPACKRGB %1, 3
+%elif %2 == 4
+ ; Read four RGB pixels and convert them to four ARGB pixels.
+ ; Read twelve bytes from the source to the destination xmm register.
+ mov TEMPq, WIDTHq
+ lea TEMPq, [TEMPq + TEMPq * 2]
+ movq %1, QWORD [ARGBq + TEMPq * 2]
+ movd xmm1, DWORD [ARGBq + TEMPq * 2 + 8]
+ shufps %1, xmm1, 01000100B
+
+ ; Fill the alpha values of these RGB pixels with 0 and convert them to four
+ ; ARGB pixels.
+ UNPACKRGB %1, 3
+ UNPACKRGB %1, 4 + 3
+ UNPACKRGB %1, 4 + 4 + 3
+%else
+%error unsupported number of pixels.
+%endif
+
+%else
+%error unsupported PIXELSIZE value.
+%endif
+
+%endmacro
+
+;
+; CALC_Y %1 (xmm), %2 (xmm)
+; Calculates four Y values from four ARGB pixels stored in %2.
+; %1.b[0] = ToByte((25 * B(0) + 129 * G(0) + 66 * R(0) + 128) / 256 + 16);
+; %1.b[1] = ToByte((25 * B(1) + 129 * G(1) + 66 * R(1) + 128) / 256 + 16);
+; %1.b[2] = ToByte((25 * B(2) + 129 * G(2) + 66 * R(2) + 128) / 256 + 16);
+; %1.b[3] = ToByte((25 * B(3) + 129 * G(3) + 66 * R(3) + 128) / 256 + 16);
+;
+%macro CALC_Y 2
+ ; To avoid signed saturation, we divide this conversion formula into two
+ ; formulae and store their results into two XMM registers %1 and xmm2.
+ ; %1.w[0] = 25 * %2.b[0] + 2 * %2.b[1] + 66 * %2.b[2] + 0 * %2.b[3];
+ ; %1.w[1] = 25 * %2.b[4] + 2 * %2.b[5] + 66 * %2.b[6] + 0 * %2.b[7];
+ ; %1.w[2] = 25 * %2.b[8] + 2 * %2.b[9] + 66 * %2.b[10] + 0 * %2.b[11];
+ ; %1.w[3] = 25 * %2.b[12] + 2 * %2.b[13] + 66 * %2.b[14] + 0 * %2.b[15];
+ ; xmm2.w[0] = 0 * %2.b[0] + 127 * %2.b[1] + 0 * %2.b[2] + 0 * %2.b[3];
+ ; xmm2.w[1] = 0 * %2.b[4] + 127 * %2.b[5] + 0 * %2.b[6] + 0 * %2.b[7];
+ ; xmm2.w[2] = 0 * %2.b[8] + 127 * %2.b[9] + 0 * %2.b[10] + 0 * %2.b[11];
+ ; xmm2.w[3] = 0 * %2.b[12] + 127 * %2.b[13] + 0 * %2.b[14] + 0 * %2.b[15];
+ movdqa %1, %2
+ pmaddubsw %1, XMM_CONST_Y0
+ phaddsw %1, %1
+ movdqa xmm2, %2
+ pmaddubsw xmm2, XMM_CONST_Y1
+ phaddsw xmm2, xmm2
+
+ ; %1.b[0] = ToByte((%1.w[0] + xmm2.w[0] + 128) / 256 + 16);
+ ; %1.b[1] = ToByte((%1.w[1] + xmm2.w[1] + 128) / 256 + 16);
+ ; %1.b[2] = ToByte((%1.w[2] + xmm2.w[2] + 128) / 256 + 16);
+ ; %1.b[3] = ToByte((%1.w[3] + xmm2.w[3] + 128) / 256 + 16);
+ paddw %1, xmm2
+ movdqa xmm2, XMM_CONST_128
+ paddw %1, xmm2
+ psrlw %1, 8
+ psrlw xmm2, 3
+ paddw %1, xmm2
+ packuswb %1, %1
+%endmacro
+
+;
+; INIT_UV %1 (r32), %2 (reg) %3 (imm)
+;
+%macro INIT_UV 3
+
+%if SUBSAMPLING == 1 && LINE == 1
+%if %3 == 1 || %3 == 2
+ movzx %1, BYTE [%2 + WIDTHq]
+%elif %3 == 4
+ movzx %1, WORD [%2 + WIDTHq]
+%else
+%error unsupported number of pixels.
+%endif
+%endif
+
+%endmacro
+
+;
+; CALC_UV %1 (xmm), %2 (xmm), %3 (xmm), %4 (r32)
+; Calculates two U (or V) values from four ARGB pixels stored in %2.
+; if %3 == XMM_CONST_U
+; if (SUBSAMPLING) {
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; } else {
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; }
+; if %3 == XMM_CONST_V
+; %1.b[0] = ToByte((-18 * B(0) - 94 * G(0) + 112 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((-18 * B(2) - 94 * G(2) + 112 * R(2) + 128) / 256 + 128);
+;
+%macro CALC_UV 4
+ ; for (int i = 0; i < 4; ++i) {
+ ; %1.w[i] = 0;
+ ; for (int j = 0; j < 4; ++j)
+ ; %1.w[i] += %3.b[i * 4 + j] + %2.b[i * 4 + j];
+ ; }
+ movdqa %1, %2
+ pmaddubsw %1, %3
+ phaddsw %1, %1
+
+%if SUBSAMPLING == 1
+ ; %1.w[0] = (%1.w[0] + %1.w[1] + 1) / 2;
+ ; %1.w[1] = (%1.w[1] + %1.w[0] + 1) / 2;
+ ; %1.w[2] = (%1.w[2] + %1.w[3] + 1) / 2;
+ ; %1.w[3] = (%1.w[3] + %1.w[2] + 1) / 2;
+ pshuflw xmm2, %1, 10110001B
+ pavgw %1, xmm2
+%endif
+
+ ; %1.b[0] = ToByte((%1.w[0] + 128) / 256 + 128);
+ ; %1.b[1] = ToByte((%1.w[2] + 128) / 256 + 128);
+ pshuflw %1, %1, 10001000B
+ paddw %1, XMM_CONST_128
+ psraw %1, 8
+ paddw %1, XMM_CONST_128
+ packuswb %1, %1
+
+%if SUBSAMPLING == 1 && LINE == 1
+ ; %1.b[0] = (%1.b[0] + %3.b[0] + 1) / 2;
+ ; %1.b[1] = (%1.b[1] + %3.b[1] + 1) / 2;
+ movd xmm2, %4
+ pavgb %1, xmm2
+%endif
+%endmacro
+
+;
+; extern "C" void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertARGBToYUVRow_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 0
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertRGBToYUVRow_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 0
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertARGBToYUVEven_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertARGBToYUVEven_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 1
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertARGBToYUVOdd_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertARGBToYUVOdd_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 1
+%define LINE 1
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVEven_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertRGBToYUVEven_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 1
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVOdd_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; ptrdiff_t width);
+;
+%define SYMBOL ConvertRGBToYUVOdd_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 1
+%define LINE 1
+%include "convert_rgb_to_yuv_ssse3.inc"
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.cc b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.cc
new file mode 100644
index 00000000000..e956926a1f5
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/convert_rgb_to_yuv.h"
+
+#include "build/build_config.h"
+#include "media/base/simd/convert_rgb_to_yuv_ssse3.h"
+
+namespace media {
+
+void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ for (; height >= 2; height -= 2) {
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+}
+
+void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ for (; height >= 2; height -= 2) {
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.h b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.h
new file mode 100644
index 00000000000..92144c9aaca
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
+#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// The header file for ASM functions that convert a row of RGB pixels with SSSE3
+// instructions so we can call them from C++ code. These functions are
+// implemented in "convert_rgb_to_yuv_ssse3.asm".
+
+// We use ptrdiff_t instead of int for yasm routine parameters to portably
+// sign-extend int. On Win64, MSVC does not sign-extend the value in the stack
+// home of int function parameters, and yasm routines are unaware of this lack
+// of extension and fault. ptrdiff_t is portably sign-extended and fixes this
+// issue on at least Win64.
+
+// Convert a row of 24-bit RGB pixels to YV12 pixels.
+void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ ptrdiff_t width);
+
+// Convert a row of 32-bit RGB pixels to YV12 pixels.
+void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ ptrdiff_t width);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.inc b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.inc
new file mode 100644
index 00000000000..e49e922a0f5
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_ssse3.inc
@@ -0,0 +1,200 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+;
+; void SYMBOL(const uint8* argb, uint8* y, uint8* u, uint8* v, int width);
+;
+; The main code that converts RGB pixels to YUV pixels. This function roughly
+; consists of three parts: converting one ARGB pixel to YUV pixels, converting
+; two ARGB pixels to YUV pixels, and converting four ARGB pixels to YUV pixels.
+; To write the structure of this function in C, it becomes the snippet listed
+; below.
+;
+; if (width & 1) {
+; --width;
+; // Convert one ARGB pixel to one Y pixel, one U pixel, and one V pixel.
+; }
+;
+; if (width & 2) {
+; width -= 2;
+; // Convert two ARGB pixels to two Y pixels, one U pixel, and one V pixel.
+; }
+;
+; while (width) {
+; width -= 4;
+; // Convert four ARGB pixels to four Y pixels, two U pixels, and two V
+; // pixels.
+; }
+;
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 5, 6, 8, ARGB, Y, U, V, WIDTH, TEMP
+
+ ; Initialize constants used in this function. (We use immediates to avoid
+ ; dependency onto GOT.)
+ LOAD_XMM XMM_CONST_Y0, 0x00420219
+ LOAD_XMM XMM_CONST_Y1, 0x00007F00
+ LOAD_XMM XMM_CONST_U, 0x00DAB670
+ LOAD_XMM XMM_CONST_V, 0x0070A2EE
+ LOAD_XMM XMM_CONST_128, 0x00800080
+
+.convert_one_pixel:
+ ; Divide the input width by two so it represents the offsets for u[] and v[].
+ ; When the width is odd, We read the rightmost ARGB pixel and convert its
+ ; colorspace to YUV. This code stores one Y pixel, one U pixel, and one V
+ ; pixel.
+ sar WIDTHq, 1
+ jnc .convert_two_pixels
+
+ ; Read one ARGB (or RGB) pixel.
+ READ_ARGB xmm0, 1
+
+ ; Calculate y[0] from one RGB pixel read above.
+ CALC_Y xmm1, xmm0
+ movd TEMPd, xmm1
+ mov BYTE [Yq + WIDTHq * 2], TEMPb
+
+ ; Calculate u[0] from one RGB pixel read above. If this is an odd line, the
+ ; output pixel contains the U value calculated in the previous call. We also
+ ; read this pixel and calculate their average.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Uq + WIDTHq], TEMPb
+
+ ; Calculate v[0] from one RGB pixel. Same as u[0], we read the result of the
+ ; previous call and get their average.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Vq + WIDTHq], TEMPb
+
+.convert_two_pixels:
+ ; If the input width is not a multiple of four, read the rightmost two ARGB
+ ; pixels and convert their colorspace to YUV. This code stores two Y pixels,
+ ; one U pixel, and one V pixel.
+ test WIDTHb, 2 / 2
+ jz .convert_four_pixels
+ sub WIDTHb, 2 / 2
+
+ ; Read two ARGB (or RGB) pixels.
+ READ_ARGB xmm0, 2
+
+ ; Calculate r[0] and r[1] from two RGB pixels read above.
+ CALC_Y xmm1, xmm0
+ movd TEMPd, xmm1
+ mov WORD [Yq + WIDTHq * 2], TEMPw
+
+ ; Skip calculating u and v if the output buffer is NULL.
+ test Uq, Uq
+ jz .convert_four_pixels
+
+ ; Calculate u[0] from two RGB pixels read above. (For details, read the above
+ ; comment in .convert_one_pixel).
+ INIT_UV TEMPd, Uq, 2
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Uq + WIDTHq], TEMPb
+
+ ; Calculate v[0] from two RGB pixels read above.
+ INIT_UV TEMPd, Vq, 2
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Vq + WIDTHq], TEMPb
+
+.convert_four_pixels:
+ ; Read four ARGB pixels and convert their colorspace to YUV. This code stores
+ ; four Y pixels, two U pixels, and two V pixels.
+ test WIDTHq, WIDTHq
+ jz .convert_finish
+
+%if PIXELSIZE == 4
+ ; Check if the input buffer is aligned to a 16-byte boundary and use movdqa
+ ; for reading the ARGB pixels.
+ test ARGBw, 15
+ jnz .convert_four_pixels_unaligned
+
+.convert_four_pixels_aligned:
+ sub WIDTHq, 4 / 2
+
+ ; Read four ARGB pixels. (We can use movdqa here since we have checked if the
+ ; source address is aligned.)
+ movdqa xmm0, DQWORD [ARGBq + WIDTHq * 4 * 2]
+
+ ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
+ CALC_Y xmm1, xmm0
+ movd DWORD [Yq + WIDTHq * 2], xmm1
+
+%if SUBSAMPLING == 0
+ ; Skip calculating u and v if the output buffer is NULL, which means we are
+ ; converting an odd line. (When we enable subsampling, these buffers must
+ ; contain the u and v values for the previous call, i.e. these variables must
+ ; not be NULL.)
+ test Uq, Uq
+ jz .convert_four_pixels_aligned_next
+%endif
+
+ ; Calculate u[0] and u[1] from four ARGB pixels read above.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Uq + WIDTHq], TEMPw
+
+ ; Calculate v[0] and v[1] from four ARGB pixels read above.
+ INIT_UV TEMPd, Vq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Vq + WIDTHq], TEMPw
+
+%if SUBSAMPLING == 0
+.convert_four_pixels_aligned_next:
+%endif
+
+ test WIDTHq, WIDTHq
+ jnz .convert_four_pixels_aligned
+
+ jmp .convert_finish
+%endif
+
+.convert_four_pixels_unaligned:
+ sub WIDTHq, 4 / 2
+
+ ; Read four ARGB (or RGB) pixels.
+ READ_ARGB xmm0, 4
+
+ ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
+ CALC_Y xmm1, xmm0
+ movd DWORD [Yq + WIDTHq * 2], xmm1
+
+%if SUBSAMPLING == 0
+ ; Skip calculating u and v if the output buffer is NULL.
+ test Uq, Uq
+ jz .convert_four_pixels_unaligned_next
+%endif
+
+ ; Calculate u[0] and u[1] from the input ARGB pixels.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Uq + WIDTHq], TEMPw
+
+ ; Calculate v[0] and v[1] from the input ARGB pixels.
+ INIT_UV TEMPd, Vq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Vq + WIDTHq], TEMPw
+
+%if SUBSAMPLING == 0
+.convert_four_pixels_unaligned_next:
+%endif
+
+ test WIDTHq, WIDTHq
+ jnz .convert_four_pixels_unaligned
+
+.convert_finish:
+ ; Just exit this function since this is a void function.
+ RET
diff --git a/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc b/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc
new file mode 100644
index 00000000000..d8f8b9caadb
--- /dev/null
+++ b/chromium/media/base/simd/convert_rgb_to_yuv_unittest.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Reference code that converts RGB pixels to YUV pixels.
+int ConvertRGBToY(const uint8* rgb) {
+ int y = 25 * rgb[0] + 129 * rgb[1] + 66 * rgb[2];
+ y = ((y + 128) >> 8) + 16;
+ return std::max(0, std::min(255, y));
+}
+
+int ConvertRGBToU(const uint8* rgb, int size) {
+ int u = 112 * rgb[0] - 74 * rgb[1] - 38 * rgb[2];
+ u = ((u + 128) >> 8) + 128;
+ return std::max(0, std::min(255, u));
+}
+
+int ConvertRGBToV(const uint8* rgb, int size) {
+ int v = -18 * rgb[0] - 94 * rgb[1] + 112 * rgb[2];
+ v = ((v + 128) >> 8) + 128;
+ return std::max(0, std::min(255, v));
+}
+
+} // namespace
+
+// A side-by-side test that verifies our ASM functions that convert RGB pixels
+// to YUV pixels can output the expected results. This test converts RGB pixels
+// to YUV pixels with our ASM functions (which use SSE, SSE2, SSE3, and SSSE3)
+// and compare the output YUV pixels with the ones calculated with out reference
+// functions implemented in C++.
+TEST(YUVConvertTest, SideBySideRGB) {
+ // We skip this test on PCs which does not support SSE3 because this test
+ // needs it.
+ base::CPU cpu;
+ if (!cpu.has_ssse3())
+ return;
+
+ // This test checks a subset of all RGB values so this test does not take so
+ // long time.
+ const int kStep = 8;
+ const int kWidth = 256 / kStep;
+
+ for (int size = 3; size <= 4; ++size) {
+ // Create the output buffers.
+ scoped_ptr<uint8[]> rgb(new uint8[kWidth * size]);
+ scoped_ptr<uint8[]> y(new uint8[kWidth]);
+ scoped_ptr<uint8[]> u(new uint8[kWidth / 2]);
+ scoped_ptr<uint8[]> v(new uint8[kWidth / 2]);
+
+ // Choose the function that converts from RGB pixels to YUV ones.
+ void (*convert)(const uint8*, uint8*, uint8*, uint8*,
+ int, int, int, int, int) = NULL;
+ if (size == 3)
+ convert = media::ConvertRGB24ToYUV_SSSE3;
+ else
+ convert = media::ConvertRGB32ToYUV_SSSE3;
+
+ int total_error = 0;
+ for (int r = 0; r < kWidth; ++r) {
+ for (int g = 0; g < kWidth; ++g) {
+
+ // Fill the input pixels.
+ for (int b = 0; b < kWidth; ++b) {
+ rgb[b * size + 0] = b * kStep;
+ rgb[b * size + 1] = g * kStep;
+ rgb[b * size + 2] = r * kStep;
+ if (size == 4)
+ rgb[b * size + 3] = 255;
+ }
+
+ // Convert the input RGB pixels to YUV ones.
+ convert(rgb.get(), y.get(), u.get(), v.get(), kWidth, 1, kWidth * size,
+ kWidth, kWidth / 2);
+
+ // Check the output Y pixels.
+ for (int i = 0; i < kWidth; ++i) {
+ const uint8* p = &rgb[i * size];
+ int error = ConvertRGBToY(p) - y[i];
+ total_error += error > 0 ? error : -error;
+ }
+
+ // Check the output U pixels.
+ for (int i = 0; i < kWidth / 2; ++i) {
+ const uint8* p = &rgb[i * 2 * size];
+ int error = ConvertRGBToU(p, size) - u[i];
+ total_error += error > 0 ? error : -error;
+ }
+
+ // Check the output V pixels.
+ for (int i = 0; i < kWidth / 2; ++i) {
+ const uint8* p = &rgb[i * 2 * size];
+ int error = ConvertRGBToV(p, size) - v[i];
+ total_error += error > 0 ? error : -error;
+ }
+ }
+ }
+
+ EXPECT_EQ(0, total_error);
+ }
+}
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb.h b/chromium/media/base/simd/convert_yuv_to_rgb.h
new file mode 100644
index 00000000000..2991d562af0
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
+#define MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
+
+#include "base/basictypes.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+// These methods are exported for testing purposes only. Library users should
+// only call the methods listed in yuv_convert.h.
+
+MEDIA_EXPORT void ConvertYUVToRGB32_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+MEDIA_EXPORT void ConvertYUVToRGB32Row_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
+MEDIA_EXPORT void ConvertYUVAToARGB_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int avstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+MEDIA_EXPORT void ConvertYUVAToARGBRow_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
+MEDIA_EXPORT void ConvertYUVToRGB32_SSE(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+MEDIA_EXPORT void ConvertYUVToRGB32_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+MEDIA_EXPORT void ConvertYUVAToARGB_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int avstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int dest_width,
+ int source_x,
+ int source_dx);
+
+} // namespace media
+
+// Assembly functions are declared without namespace.
+extern "C" {
+
+// We use ptrdiff_t instead of int for yasm routine parameters to portably
+// sign-extend int. On Win64, MSVC does not sign-extend the value in the stack
+// home of int function parameters, and yasm routines are unaware of this lack
+// of extension and fault. ptrdiff_t is portably sign-extended and fixes this
+// issue on at least Win64. The C-equivalent RowProc versions' prototypes
+// include the same change to ptrdiff_t to reuse the typedefs.
+
+MEDIA_EXPORT void ConvertYUVToRGB32Row_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
+MEDIA_EXPORT void ConvertYUVAToARGBRow_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
+MEDIA_EXPORT void ConvertYUVToRGB32Row_SSE(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_MMX(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+MEDIA_EXPORT void LinearScaleYUVToRGB32Row_MMX_X64(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx);
+
+} // extern "C"
+
+#endif // MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_c.cc b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
new file mode 100644
index 00000000000..b8ebd1eeb12
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -0,0 +1,257 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/simd/yuv_to_rgb_table.h"
+
+namespace media {
+
+#define packuswb(x) ((x) < 0 ? 0 : ((x) > 255 ? 255 : (x)))
+#define paddsw(x, y) (((x) + (y)) < -32768 ? -32768 : \
+ (((x) + (y)) > 32767 ? 32767 : ((x) + (y))))
+
+// On Android, pixel layout is RGBA (see skia/include/core/SkColorPriv.h);
+// however, other Chrome platforms use BGRA (see skia/config/SkUserConfig.h).
+// Ideally, android should not use the functions here due to performance issue
+// (http://crbug.com/249980).
+#if defined(OS_ANDROID)
+#define SK_R32_SHIFT 0
+#define SK_G32_SHIFT 8
+#define SK_B32_SHIFT 16
+#define SK_A32_SHIFT 24
+#else
+#define SK_B32_SHIFT 0
+#define SK_G32_SHIFT 8
+#define SK_R32_SHIFT 16
+#define SK_A32_SHIFT 24
+#endif
+
+static inline void ConvertYUVToRGB32_C(uint8 y,
+ uint8 u,
+ uint8 v,
+ uint8* rgb_buf) {
+ int b = kCoefficientsRgbY[256+u][0];
+ int g = kCoefficientsRgbY[256+u][1];
+ int r = kCoefficientsRgbY[256+u][2];
+ int a = kCoefficientsRgbY[256+u][3];
+
+ b = paddsw(b, kCoefficientsRgbY[512+v][0]);
+ g = paddsw(g, kCoefficientsRgbY[512+v][1]);
+ r = paddsw(r, kCoefficientsRgbY[512+v][2]);
+ a = paddsw(a, kCoefficientsRgbY[512+v][3]);
+
+ b = paddsw(b, kCoefficientsRgbY[y][0]);
+ g = paddsw(g, kCoefficientsRgbY[y][1]);
+ r = paddsw(r, kCoefficientsRgbY[y][2]);
+ a = paddsw(a, kCoefficientsRgbY[y][3]);
+
+ b >>= 6;
+ g >>= 6;
+ r >>= 6;
+ a >>= 6;
+
+ *reinterpret_cast<uint32*>(rgb_buf) = (packuswb(b) << SK_B32_SHIFT) |
+ (packuswb(g) << SK_G32_SHIFT) |
+ (packuswb(r) << SK_R32_SHIFT) |
+ (packuswb(a) << SK_A32_SHIFT);
+}
+
+static inline void ConvertYUVAToARGB_C(uint8 y,
+ uint8 u,
+ uint8 v,
+ uint8 a,
+ uint8* rgb_buf) {
+ int b = kCoefficientsRgbY[256+u][0];
+ int g = kCoefficientsRgbY[256+u][1];
+ int r = kCoefficientsRgbY[256+u][2];
+
+ b = paddsw(b, kCoefficientsRgbY[512+v][0]);
+ g = paddsw(g, kCoefficientsRgbY[512+v][1]);
+ r = paddsw(r, kCoefficientsRgbY[512+v][2]);
+
+ b = paddsw(b, kCoefficientsRgbY[y][0]);
+ g = paddsw(g, kCoefficientsRgbY[y][1]);
+ r = paddsw(r, kCoefficientsRgbY[y][2]);
+
+ b >>= 6;
+ g >>= 6;
+ r >>= 6;
+
+ b = packuswb(b) * a >> 8;
+ g = packuswb(g) * a >> 8;
+ r = packuswb(r) * a >> 8;
+
+ *reinterpret_cast<uint32*>(rgb_buf) = (b << SK_B32_SHIFT) |
+ (g << SK_G32_SHIFT) |
+ (r << SK_R32_SHIFT) |
+ (a << SK_A32_SHIFT);
+}
+
+void ConvertYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width) {
+ for (int x = 0; x < width; x += 2) {
+ uint8 u = u_buf[x >> 1];
+ uint8 v = v_buf[x >> 1];
+ uint8 y0 = y_buf[x];
+ ConvertYUVToRGB32_C(y0, u, v, rgb_buf);
+ if ((x + 1) < width) {
+ uint8 y1 = y_buf[x + 1];
+ ConvertYUVToRGB32_C(y1, u, v, rgb_buf + 4);
+ }
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+}
+
+void ConvertYUVAToARGBRow_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ const uint8* a_buf,
+ uint8* rgba_buf,
+ ptrdiff_t width) {
+ for (int x = 0; x < width; x += 2) {
+ uint8 u = u_buf[x >> 1];
+ uint8 v = v_buf[x >> 1];
+ uint8 y0 = y_buf[x];
+ uint8 a0 = a_buf[x];
+ ConvertYUVAToARGB_C(y0, u, v, a0, rgba_buf);
+ if ((x + 1) < width) {
+ uint8 y1 = y_buf[x + 1];
+ uint8 a1 = a_buf[x + 1];
+ ConvertYUVAToARGB_C(y1, u, v, a1, rgba_buf + 4);
+ }
+ rgba_buf += 8; // Advance 2 pixels.
+ }
+}
+
+// 16.16 fixed point is used. A shift by 16 isolates the integer.
+// A shift by 17 is used to further subsample the chrominence channels.
+// & 0xffff isolates the fixed point fraction. >> 2 to get the upper 2 bits,
+// for 1/65536 pixel accurate interpolation.
+void ScaleYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx) {
+ int x = 0;
+ for (int i = 0; i < width; i += 2) {
+ int y = y_buf[x >> 16];
+ int u = u_buf[(x >> 17)];
+ int v = v_buf[(x >> 17)];
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf);
+ x += source_dx;
+ if ((i + 1) < width) {
+ y = y_buf[x >> 16];
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
+ x += source_dx;
+ }
+ rgb_buf += 8;
+ }
+}
+
+void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ ptrdiff_t width,
+ ptrdiff_t source_dx) {
+ // Avoid point-sampling for down-scaling by > 2:1.
+ int source_x = 0;
+ if (source_dx >= 0x20000)
+ source_x += 0x8000;
+ LinearScaleYUVToRGB32RowWithRange_C(y_buf, u_buf, v_buf, rgb_buf, width,
+ source_x, source_dx);
+}
+
+void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int dest_width,
+ int x,
+ int source_dx) {
+ for (int i = 0; i < dest_width; i += 2) {
+ int y0 = y_buf[x >> 16];
+ int y1 = y_buf[(x >> 16) + 1];
+ int u0 = u_buf[(x >> 17)];
+ int u1 = u_buf[(x >> 17) + 1];
+ int v0 = v_buf[(x >> 17)];
+ int v1 = v_buf[(x >> 17) + 1];
+ int y_frac = (x & 65535);
+ int uv_frac = ((x >> 1) & 65535);
+ int y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
+ int u = (uv_frac * u1 + (uv_frac ^ 65535) * u0) >> 16;
+ int v = (uv_frac * v1 + (uv_frac ^ 65535) * v0) >> 16;
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf);
+ x += source_dx;
+ if ((i + 1) < dest_width) {
+ y0 = y_buf[x >> 16];
+ y1 = y_buf[(x >> 16) + 1];
+ y_frac = (x & 65535);
+ y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
+ ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
+ x += source_dx;
+ }
+ rgb_buf += 8;
+ }
+}
+
+void ConvertYUVToRGB32_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; ++y) {
+ uint8* rgb_row = rgbframe + y * rgbstride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+
+ ConvertYUVToRGB32Row_C(y_ptr,
+ u_ptr,
+ v_ptr,
+ rgb_row,
+ width);
+ }
+}
+
+void ConvertYUVAToARGB_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbaframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbastride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; y++) {
+ uint8* rgba_row = rgbaframe + y * rgbastride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+ const uint8* a_ptr = aplane + y * astride;
+
+ ConvertYUVAToARGBRow_C(y_ptr,
+ u_ptr,
+ v_ptr,
+ a_ptr,
+ rgba_row,
+ width);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm
new file mode 100644
index 00000000000..39a4f75697d
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.asm
@@ -0,0 +1,22 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+; Use movq to save the output.
+%define MOVQ movq
+
+; extern "C" void ConvertYUVToRGB32Row_MMX(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width);
+%define SYMBOL ConvertYUVToRGB32Row_MMX
+%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc
new file mode 100644
index 00000000000..e38794a814c
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_mmx.inc
@@ -0,0 +1,121 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+
+ EXPORT SYMBOL
+ align function_align
+
+; Non-PIC code is the fastest so use this if possible.
+%ifndef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMPU, TEMPV
+ extern mangle(kCoefficientsRgbY)
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPUd, BYTE [Uq]
+ add Uq, 1
+ movzx TEMPVd, BYTE [Vq]
+ add Vq, 1
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
+ movzx TEMPUd, BYTE [Yq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
+ movzx TEMPVd, BYTE [Yq + 1]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
+ add Yq, 2
+ movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPVq]
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+ MOVQ [ARGBq], mm1
+ add ARGBq, 8
+
+.convertend:
+ sub WIDTHq, 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and WIDTHq, 1
+ jz .convertdone
+
+ movzx TEMPUd, BYTE [Uq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
+ movzx TEMPVd, BYTE [Vq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
+ movzx TEMPUd, BYTE [Yq]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+ movd [ARGBq], mm1
+
+.convertdone:
+ RET
+%endif
+
+; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
+; This code is slower than the above version.
+%ifdef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMP, TABLE
+
+ extern mangle(kCoefficientsRgbY)
+ LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ add Uq, 1
+
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ add Vq, 1
+
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+
+ movzx TEMPd, BYTE [Yq + 1]
+ movq mm2, [TABLEq + 8 * TEMPq]
+ add Yq, 2
+
+ ; Add UV components to Y component.
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+
+ ; Down shift and then pack.
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+ MOVQ [ARGBq], mm1
+ add ARGBq, 8
+
+.convertend:
+ sub WIDTHq, 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and WIDTHq, 1
+ jz .convertdone
+
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+ movd [ARGBq], mm1
+
+.convertdone:
+ RET
+%endif
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
new file mode 100644
index 00000000000..8b3ee582ad2
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_sse.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX and SSE instructions.
+;
+ SECTION_TEXT
+ CPU MMX, SSE
+
+; Use SSE instruction movntq can write faster.
+%define MOVQ movntq
+
+;
+; extern "C" void ConvertYUVToRGB32Row_SSE(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width);
+%define SYMBOL ConvertYUVToRGB32Row_SSE
+%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
new file mode 100644
index 00000000000..d1d6e16beb7
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuv_to_rgb_x86.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else
+#include <mmintrin.h>
+#endif
+
+#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+void ConvertYUVToRGB32_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; ++y) {
+ uint8* rgb_row = rgbframe + y * rgbstride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+
+ ConvertYUVToRGB32Row_MMX(y_ptr,
+ u_ptr,
+ v_ptr,
+ rgb_row,
+ width);
+ }
+
+ EmptyRegisterState();
+}
+
+void ConvertYUVAToARGB_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; ++y) {
+ uint8* rgb_row = rgbframe + y * rgbstride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+ const uint8* a_ptr = aplane + y * astride;
+
+ ConvertYUVAToARGBRow_MMX(y_ptr,
+ u_ptr,
+ v_ptr,
+ a_ptr,
+ rgb_row,
+ width);
+ }
+
+ EmptyRegisterState();
+}
+
+void ConvertYUVToRGB32_SSE(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; ++y) {
+ uint8* rgb_row = rgbframe + y * rgbstride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+
+ ConvertYUVToRGB32Row_SSE(y_ptr,
+ u_ptr,
+ v_ptr,
+ rgb_row,
+ width);
+ }
+
+ EmptyRegisterState();
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
new file mode 100644
index 00000000000..b39315dc461
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuva_to_argb_mmx.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+; Use movq to save the output.
+%define MOVQ movq
+
+; extern "C" void ConvertYUVAToARGBRow_MMX(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; const uint8* a_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width);
+%define SYMBOL ConvertYUVAToARGBRow_MMX
+%include "convert_yuva_to_argb_mmx.inc"
diff --git a/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc b/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc
new file mode 100644
index 00000000000..bcafb3807ba
--- /dev/null
+++ b/chromium/media/base/simd/convert_yuva_to_argb_mmx.inc
@@ -0,0 +1,176 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+
+ EXPORT SYMBOL
+ align function_align
+
+; Non-PIC code is the fastest so use this if possible.
+%ifndef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
+ extern mangle(kCoefficientsRgbY)
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
+ add Uq, 1
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
+ add Vq, 1
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq + 1]
+ movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ add Yq, 2
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+
+ ; Multiply ARGB by alpha value.
+ movq mm0, mm1
+ pxor mm2, mm2
+ punpcklbw mm0, mm2
+ punpckhbw mm1, mm2
+ movzx TEMPd, BYTE [Aq]
+ movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ pmullw mm0, mm2
+ psrlw mm0, 8
+ movzx TEMPd, BYTE [Aq + 1]
+ movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ add Aq, 2
+ pmullw mm1, mm2
+ psrlw mm1, 8
+ packuswb mm0, mm1
+
+ MOVQ [ARGBq], mm0
+ add ARGBq, 8
+
+.convertend:
+ sub WIDTHq, 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and WIDTHq, 1
+ jz .convertdone
+
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+
+ ; Multiply ARGB by alpha value.
+ pxor mm0, mm0
+ punpcklbw mm1, mm0
+ movzx TEMPd, BYTE [Aq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ pmullw mm1, mm0
+ psrlw mm1, 8
+ packuswb mm1, mm1
+
+ movd [ARGBq], mm1
+
+.convertdone:
+ RET
+%endif
+
+; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
+; This code is slower than the above version.
+%ifdef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
+ extern mangle(kCoefficientsRgbY)
+ PUSH WIDTHq
+ DEFINE_ARGS Y, U, V, A, ARGB, TABLE, TEMP
+ LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ add Uq, 1
+
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ add Vq, 1
+
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+
+ movzx TEMPd, BYTE [Yq + 1]
+ movq mm2, [TABLEq + 8 * TEMPq]
+ add Yq, 2
+
+ ; Add UV components to Y component.
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+
+ ; Down shift and then pack.
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+
+ ; Unpack and multiply by alpha value, then repack high bytes of words.
+ movq mm0, mm1
+ pxor mm2, mm2
+ punpcklbw mm0, mm2
+ punpckhbw mm1, mm2
+ movzx TEMPd, BYTE [Aq]
+ movq mm2, [TABLEq + 6144 + 8 * TEMPq]
+ pmullw mm0, mm2
+ psrlw mm0, 8
+ movzx TEMPd, BYTE [Aq + 1]
+ movq mm2, [TABLEq + 6144 + 8 * TEMPq]
+ add Aq, 2
+ pmullw mm1, mm2
+ psrlw mm1, 8
+ packuswb mm0, mm1
+
+ MOVQ [ARGBq], mm0
+ add ARGBq, 8
+
+.convertend:
+ sub dword [rsp], 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and dword [rsp], 1
+ jz .convertdone
+
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+
+ ; Multiply ARGB by alpha value.
+ pxor mm0, mm0
+ punpcklbw mm1, mm0
+ movzx TEMPd, BYTE [Aq]
+ movq mm0, [TABLEq + 6144 + 8 * TEMPq]
+ pmullw mm1, mm0
+ psrlw mm1, 8
+ packuswb mm1, mm1
+
+ movd [ARGBq], mm1
+
+.convertdone:
+ POP TABLEq
+ RET
+%endif \ No newline at end of file
diff --git a/chromium/media/base/simd/empty_register_state_mmx.asm b/chromium/media/base/simd/empty_register_state_mmx.asm
new file mode 100644
index 00000000000..d0028b51f1b
--- /dev/null
+++ b/chromium/media/base/simd/empty_register_state_mmx.asm
@@ -0,0 +1,24 @@
+; Copyright (c) 2013 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions as an alternative to _mm_empty() which
+; is not supported in Visual Studio 2010 on x64.
+; TODO(wolenetz): Use MMX intrinsics when compiling win64 with Visual
+; Studio 2012? http://crbug.com/173450
+;
+ SECTION_TEXT
+ CPU MMX
+
+%define SYMBOL EmptyRegisterState_MMX
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ emms
+ ret
+
diff --git a/chromium/media/base/simd/filter_yuv.h b/chromium/media/base/simd/filter_yuv.h
new file mode 100644
index 00000000000..a656f897548
--- /dev/null
+++ b/chromium/media/base/simd/filter_yuv.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_FILTER_YUV_H_
+#define MEDIA_BASE_SIMD_FILTER_YUV_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// These methods are exported for testing purposes only. Library users should
+// only call the methods listed in yuv_convert.h.
+
+MEDIA_EXPORT void FilterYUVRows_C(uint8* ybuf,
+ const uint8* y0_ptr,
+ const uint8* y1_ptr,
+ int source_width,
+ int source_y_fraction);
+
+MEDIA_EXPORT void FilterYUVRows_MMX(uint8* ybuf,
+ const uint8* y0_ptr,
+ const uint8* y1_ptr,
+ int source_width,
+ int source_y_fraction);
+
+MEDIA_EXPORT void FilterYUVRows_SSE2(uint8* ybuf,
+ const uint8* y0_ptr,
+ const uint8* y1_ptr,
+ int source_width,
+ int source_y_fraction);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SIMD_FILTER_YUV_H_
diff --git a/chromium/media/base/simd/filter_yuv_c.cc b/chromium/media/base/simd/filter_yuv_c.cc
new file mode 100644
index 00000000000..f292d211352
--- /dev/null
+++ b/chromium/media/base/simd/filter_yuv_c.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/filter_yuv.h"
+
+namespace media {
+
+void FilterYUVRows_C(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr,
+ int source_width, int source_y_fraction) {
+ int y1_fraction = source_y_fraction;
+ int y0_fraction = 256 - y1_fraction;
+ uint8* end = ybuf + source_width;
+ uint8* rounded_end = ybuf + (source_width & ~7);
+
+ while (ybuf < rounded_end) {
+ ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8;
+ ybuf[1] = (y0_ptr[1] * y0_fraction + y1_ptr[1] * y1_fraction) >> 8;
+ ybuf[2] = (y0_ptr[2] * y0_fraction + y1_ptr[2] * y1_fraction) >> 8;
+ ybuf[3] = (y0_ptr[3] * y0_fraction + y1_ptr[3] * y1_fraction) >> 8;
+ ybuf[4] = (y0_ptr[4] * y0_fraction + y1_ptr[4] * y1_fraction) >> 8;
+ ybuf[5] = (y0_ptr[5] * y0_fraction + y1_ptr[5] * y1_fraction) >> 8;
+ ybuf[6] = (y0_ptr[6] * y0_fraction + y1_ptr[6] * y1_fraction) >> 8;
+ ybuf[7] = (y0_ptr[7] * y0_fraction + y1_ptr[7] * y1_fraction) >> 8;
+ y0_ptr += 8;
+ y1_ptr += 8;
+ ybuf += 8;
+ }
+
+ while (ybuf < end) {
+ ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8;
+ ++ybuf;
+ ++y0_ptr;
+ ++y1_ptr;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/filter_yuv_mmx.cc b/chromium/media/base/simd/filter_yuv_mmx.cc
new file mode 100644
index 00000000000..3991fe72fee
--- /dev/null
+++ b/chromium/media/base/simd/filter_yuv_mmx.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else
+#include <mmintrin.h>
+#endif
+
+#include "build/build_config.h"
+#include "media/base/simd/filter_yuv.h"
+
+namespace media {
+
+#if defined(COMPILER_MSVC)
+// Warning 4799 is about calling emms before the function exits.
+// We calls emms in a frame level so suppress this warning.
+#pragma warning(push)
+#pragma warning(disable: 4799)
+#endif
+
+void FilterYUVRows_MMX(uint8* dest,
+ const uint8* src0,
+ const uint8* src1,
+ int width,
+ int fraction) {
+ int pixel = 0;
+
+ // Process the unaligned bytes first.
+ int unaligned_width =
+ (8 - (reinterpret_cast<uintptr_t>(dest) & 7)) & 7;
+ while (pixel < width && pixel < unaligned_width) {
+ dest[pixel] = (src0[pixel] * (256 - fraction) +
+ src1[pixel] * fraction) >> 8;
+ ++pixel;
+ }
+
+ __m64 zero = _mm_setzero_si64();
+ __m64 src1_fraction = _mm_set1_pi16(fraction);
+ __m64 src0_fraction = _mm_set1_pi16(256 - fraction);
+ const __m64* src0_64 = reinterpret_cast<const __m64*>(src0 + pixel);
+ const __m64* src1_64 = reinterpret_cast<const __m64*>(src1 + pixel);
+ __m64* dest64 = reinterpret_cast<__m64*>(dest + pixel);
+ __m64* end64 = reinterpret_cast<__m64*>(
+ reinterpret_cast<uintptr_t>(dest + width) & ~7);
+
+ while (dest64 < end64) {
+ __m64 src0 = *src0_64++;
+ __m64 src1 = *src1_64++;
+ __m64 src2 = _mm_unpackhi_pi8(src0, zero);
+ __m64 src3 = _mm_unpackhi_pi8(src1, zero);
+ src0 = _mm_unpacklo_pi8(src0, zero);
+ src1 = _mm_unpacklo_pi8(src1, zero);
+ src0 = _mm_mullo_pi16(src0, src0_fraction);
+ src1 = _mm_mullo_pi16(src1, src1_fraction);
+ src2 = _mm_mullo_pi16(src2, src0_fraction);
+ src3 = _mm_mullo_pi16(src3, src1_fraction);
+ src0 = _mm_add_pi16(src0, src1);
+ src2 = _mm_add_pi16(src2, src3);
+ src0 = _mm_srli_pi16(src0, 8);
+ src2 = _mm_srli_pi16(src2, 8);
+ src0 = _mm_packs_pu16(src0, src2);
+ *dest64++ = src0;
+ pixel += 8;
+ }
+
+ while (pixel < width) {
+ dest[pixel] = (src0[pixel] * (256 - fraction) +
+ src1[pixel] * fraction) >> 8;
+ ++pixel;
+ }
+}
+
+#if defined(COMPILER_MSVC)
+#pragma warning(pop)
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/simd/filter_yuv_sse2.cc b/chromium/media/base/simd/filter_yuv_sse2.cc
new file mode 100644
index 00000000000..84dba5ac8a8
--- /dev/null
+++ b/chromium/media/base/simd/filter_yuv_sse2.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else
+#include <mmintrin.h>
+#include <emmintrin.h>
+#endif
+
+#include "media/base/simd/filter_yuv.h"
+
+namespace media {
+
+void FilterYUVRows_SSE2(uint8* dest,
+ const uint8* src0,
+ const uint8* src1,
+ int width,
+ int fraction) {
+ int pixel = 0;
+
+ // Process the unaligned bytes first.
+ int unaligned_width =
+ (16 - (reinterpret_cast<uintptr_t>(dest) & 15)) & 15;
+ while (pixel < width && pixel < unaligned_width) {
+ dest[pixel] = (src0[pixel] * (256 - fraction) +
+ src1[pixel] * fraction) >> 8;
+ ++pixel;
+ }
+
+ __m128i zero = _mm_setzero_si128();
+ __m128i src1_fraction = _mm_set1_epi16(fraction);
+ __m128i src0_fraction = _mm_set1_epi16(256 - fraction);
+ const __m128i* src0_128 =
+ reinterpret_cast<const __m128i*>(src0 + pixel);
+ const __m128i* src1_128 =
+ reinterpret_cast<const __m128i*>(src1 + pixel);
+ __m128i* dest128 = reinterpret_cast<__m128i*>(dest + pixel);
+ __m128i* end128 = reinterpret_cast<__m128i*>(
+ reinterpret_cast<uintptr_t>(dest + width) & ~15);
+
+ while (dest128 < end128) {
+ __m128i src0 = _mm_loadu_si128(src0_128);
+ __m128i src1 = _mm_loadu_si128(src1_128);
+ __m128i src2 = _mm_unpackhi_epi8(src0, zero);
+ __m128i src3 = _mm_unpackhi_epi8(src1, zero);
+ src0 = _mm_unpacklo_epi8(src0, zero);
+ src1 = _mm_unpacklo_epi8(src1, zero);
+ src0 = _mm_mullo_epi16(src0, src0_fraction);
+ src1 = _mm_mullo_epi16(src1, src1_fraction);
+ src2 = _mm_mullo_epi16(src2, src0_fraction);
+ src3 = _mm_mullo_epi16(src3, src1_fraction);
+ src0 = _mm_add_epi16(src0, src1);
+ src2 = _mm_add_epi16(src2, src3);
+ src0 = _mm_srli_epi16(src0, 8);
+ src2 = _mm_srli_epi16(src2, 8);
+ src0 = _mm_packus_epi16(src0, src2);
+ *dest128++ = src0;
+ ++src0_128;
+ ++src1_128;
+ pixel += 16;
+ }
+
+ while (pixel < width) {
+ dest[pixel] = (src0[pixel] * (256 - fraction) +
+ src1[pixel] * fraction) >> 8;
+ ++pixel;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
new file mode 100644
index 00000000000..40418340e35
--- /dev/null
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+; Use movq to save the output.
+%define MOVQ movq
+
+; void LinearScaleYUVToRGB32Row_MMX(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL LinearScaleYUVToRGB32Row_MMX
+%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
new file mode 100644
index 00000000000..493e9b3694d
--- /dev/null
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
@@ -0,0 +1,168 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+
+ extern mangle(kCoefficientsRgbY)
+
+; Parameters are in the following order:
+; 1. Y plane
+; 2. U plane
+; 3. V plane
+; 4. ARGB frame
+; 5. Width
+; 6. Source dx
+
+PROLOGUE 6, 7, 3, Y, R0, R1, ARGB, R2, R3, TEMP
+
+%if gprsize == 8
+%define WORD_SIZE QWORD
+%else
+%define WORD_SIZE DWORD
+%endif
+
+; Define register aliases.
+%define Xq R1q ; Current X position
+%define COMPLq R2q ; Component A value
+%define COMPLd R2d ; Component A value
+%define U_ARG_REGq R0q ; U plane address argument
+%define V_ARG_REGq R1q ; V plane address argument
+%define SOURCE_DX_ARG_REGq R3q ; Source dx argument
+%define WIDTH_ARG_REGq R2q ; Width argument
+
+%ifdef PIC
+; PIC code shared COMPR, U and V with the same register. Need to be careful in the
+; code they don't mix up. This allows R3q to be used for YUV table.
+%define COMPRq R0q ; Component B value
+%define COMPRd R0d ; Component B value
+%define Uq R0q ; U plane address
+%define Vq R0q ; V plane address
+%define U_PLANE WORD_SIZE [rsp + 3 * gprsize]
+%define TABLE R3q ; Address of the table
+%else
+; Non-PIC code defines.
+%define COMPRq R3q ; Component B value
+%define COMPRd R3d ; Component B value
+%define Uq R0q ; U plane address
+%define Vq R3q ; V plane address
+%define TABLE mangle(kCoefficientsRgbY)
+%endif
+
+; Defines for stack variables. These are used in both PIC and non-PIC code.
+%define V_PLANE WORD_SIZE [rsp + 2 * gprsize]
+%define SOURCE_DX WORD_SIZE [rsp + gprsize]
+%define SOURCE_WIDTH WORD_SIZE [rsp]
+
+; Handle stack variables differently for PIC and non-PIC code.
+
+%ifdef PIC
+; Define stack usage for PIC code. PIC code push U plane onto stack.
+ PUSH U_ARG_REGq
+ PUSH V_ARG_REGq
+ PUSH SOURCE_DX_ARG_REGq
+ imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
+ PUSH WIDTH_ARG_REGq
+
+; Load the address of kCoefficientsRgbY into TABLE
+ mov TEMPq, SOURCE_DX_ARG_REGq ; Need to save source_dx first
+ LOAD_SYM TABLE, mangle(kCoefficientsRgbY)
+%define SOURCE_DX_ARG_REGq TEMPq ; Overwrite SOURCE_DX_ARG_REGq to TEMPq
+%else
+; Define stack usage. Non-PIC code just push 3 registers to stack.
+ PUSH V_ARG_REGq
+ PUSH SOURCE_DX_ARG_REGq
+ imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
+ PUSH WIDTH_ARG_REGq
+%endif
+
+%macro EPILOGUE 0
+%ifdef PIC
+ ADD rsp, 4 * gprsize
+%else
+ ADD rsp, 3 * gprsize
+%endif
+%endmacro
+
+ xor Xq, Xq ; x = 0
+ cmp SOURCE_DX_ARG_REGq, 0x20000
+ jl .lscaleend
+ mov Xq, 0x8000 ; x = 0.5 for 1/2 or less
+ jmp .lscaleend
+
+.lscaleloop:
+%ifdef PIC
+ mov Uq, U_PLANE ; PIC code saves U_PLANE on stack.
+%endif
+
+; Define macros for scaling YUV components since they are reused.
+%macro SCALEUV 1
+ mov TEMPq, Xq
+ sar TEMPq, 0x11
+ movzx COMPLd, BYTE [%1 + TEMPq]
+ movzx COMPRd, BYTE [%1 + TEMPq + 1]
+ mov TEMPq, Xq
+ and TEMPq, 0x1fffe
+ imul COMPRq, TEMPq
+ xor TEMPq, 0x1fffe
+ imul COMPLq, TEMPq
+ add COMPLq, COMPRq
+ shr COMPLq, 17
+%endmacro
+ SCALEUV Uq ; Use the above macro to scale U
+ movq mm0, [TABLE + 2048 + 8 * COMPLq]
+
+ mov Vq, V_PLANE ; Read V address from stack
+ SCALEUV Vq ; Use the above macro to scale V
+ paddsw mm0, [TABLE + 4096 + 8 * COMPLq]
+
+%macro SCALEY 0
+ mov TEMPq, Xq
+ sar TEMPq, 0x10
+ movzx COMPLd, BYTE [Yq + TEMPq]
+ movzx COMPRd, BYTE [Yq + TEMPq + 1]
+ mov TEMPq, Xq
+ add Xq, SOURCE_DX ; Add source_dx from stack
+ and TEMPq, 0xffff
+ imul COMPRq, TEMPq
+ xor TEMPq, 0xffff
+ imul COMPLq, TEMPq
+ add COMPLq, COMPRq
+ shr COMPLq, 16
+%endmacro
+ SCALEY ; Use the above macro to scale Y1
+ movq mm1, [TABLE + 8 * COMPLq]
+
+ cmp Xq, SOURCE_WIDTH ; Compare source_width from stack
+ jge .lscalelastpixel
+
+ SCALEY ; Use the above macro to sacle Y2
+ movq mm2, [TABLE + 8 * COMPLq]
+
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 0x6
+ psraw mm2, 0x6
+ packuswb mm1, mm2
+ MOVQ [ARGBq], mm1
+ add ARGBq, 0x8
+
+.lscaleend:
+ cmp Xq, SOURCE_WIDTH ; Compare source_width from stack
+ jl .lscaleloop
+ EPILOGUE
+ RET
+
+.lscalelastpixel:
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+ movd [ARGBq], mm1
+ EPILOGUE
+ RET
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
new file mode 100644
index 00000000000..f7e1d908549
--- /dev/null
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
@@ -0,0 +1,149 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+;void LinearScaleYUVToRGB32Row_MMX_X64(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL LinearScaleYUVToRGB32Row_MMX_X64
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+ extern mangle(kCoefficientsRgbY)
+
+; Parameters are in the following order:
+; 1. Y plane
+; 2. U plane
+; 3. V plane
+; 4. ARGB frame
+; 5. Width
+; 6. Source dx
+
+PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMPL
+
+%define TABLEq r10
+%define Xq r11
+%define INDEXq r12
+%define COMPRd r13d
+%define COMPRq r13
+%define FRACTIONq r14
+
+ PUSH TABLEq
+ PUSH Xq
+ PUSH INDEXq
+ PUSH COMPRq
+ PUSH FRACTIONq
+
+%macro EPILOGUE 0
+ POP FRACTIONq
+ POP COMPRq
+ POP INDEXq
+ POP Xq
+ POP TABLEq
+%endmacro
+
+ LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+
+ imul WIDTHq, SOURCE_DXq ; source_width = width * source_dx
+ xor Xq, Xq ; x = 0
+ cmp SOURCE_DXq, 0x20000
+ jl .lscaleend
+ mov Xq, 0x8000 ; x = 0.5 for 1/2 or less
+ jmp .lscaleend
+
+.lscaleloop:
+ ; Interpolate U
+ mov INDEXq, Xq
+ sar INDEXq, 0x11
+ movzx COMPLd, BYTE [Uq + INDEXq]
+ movzx COMPRd, BYTE [Uq + INDEXq + 1]
+ mov FRACTIONq, Xq
+ and FRACTIONq, 0x1fffe
+ imul COMPRq, FRACTIONq
+ xor FRACTIONq, 0x1fffe
+ imul COMPLq, FRACTIONq
+ add COMPLq, COMPRq
+ shr COMPLq, 17
+ movq mm0, [TABLEq + 2048 + 8 * COMPLq]
+
+ ; Interpolate V
+ movzx COMPLd, BYTE [Vq + INDEXq]
+ movzx COMPRd, BYTE [Vq + INDEXq + 1]
+ ; Trick here to imul COMPL first then COMPR.
+ ; Saves two instruction. :)
+ imul COMPLq, FRACTIONq
+ xor FRACTIONq, 0x1fffe
+ imul COMPRq, FRACTIONq
+ add COMPLq, COMPRq
+ shr COMPLq, 17
+ paddsw mm0, [TABLEq + 4096 + 8 * COMPLq]
+
+ ; Interpolate first Y1.
+ lea INDEXq, [Xq + SOURCE_DXq] ; INDEXq now points to next pixel.
+ ; Xq points to current pixel.
+ mov FRACTIONq, Xq
+ sar Xq, 0x10
+ movzx COMPLd, BYTE [Yq + Xq]
+ movzx COMPRd, BYTE [Yq + Xq + 1]
+ and FRACTIONq, 0xffff
+ imul COMPRq, FRACTIONq
+ xor FRACTIONq, 0xffff
+ imul COMPLq, FRACTIONq
+ add COMPLq, COMPRq
+ shr COMPLq, 16
+ movq mm1, [TABLEq + 8 * COMPLq]
+
+ ; Interpolate Y2 if available.
+ cmp INDEXq, WIDTHq
+ jge .lscalelastpixel
+
+ lea Xq, [INDEXq + SOURCE_DXq] ; Xq points to next pixel.
+ ; INDEXq points to current pixel.
+ mov FRACTIONq, INDEXq
+ sar INDEXq, 0x10
+ movzx COMPLd, BYTE [Yq + INDEXq]
+ movzx COMPRd, BYTE [Yq + INDEXq + 1]
+ and FRACTIONq, 0xffff
+ imul COMPRq, FRACTIONq
+ xor FRACTIONq, 0xffff
+ imul COMPLq, FRACTIONq
+ add COMPLq, COMPRq
+ shr COMPLq, 16
+ movq mm2, [TABLEq + 8 * COMPLq]
+
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 0x6
+ psraw mm2, 0x6
+ packuswb mm1, mm2
+ movntq [ARGBq], mm1
+ add ARGBq, 0x8
+
+.lscaleend:
+ cmp Xq, WIDTHq
+ jl .lscaleloop
+ jmp .epilogue
+
+.lscalelastpixel:
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+ movd [ARGBq], mm1
+
+.epilogue
+ EPILOGUE
+ RET
diff --git a/chromium/media/base/simd/linear_scale_yuv_to_rgb_sse.asm b/chromium/media/base/simd/linear_scale_yuv_to_rgb_sse.asm
new file mode 100644
index 00000000000..0d6685f18c5
--- /dev/null
+++ b/chromium/media/base/simd/linear_scale_yuv_to_rgb_sse.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX and SSE instructions.
+;
+ SECTION_TEXT
+ CPU MMX, SSE
+
+; Use movq to save the output.
+%define MOVQ movntq
+
+; void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL LinearScaleYUVToRGB32Row_SSE
+%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/media_export.asm b/chromium/media/base/simd/media_export.asm
new file mode 100644
index 00000000000..e82be8d7f9d
--- /dev/null
+++ b/chromium/media/base/simd/media_export.asm
@@ -0,0 +1,48 @@
+; Copyright 2013 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+;
+; A set of helper macros for controlling symbol visibility.
+;
+
+%ifndef MEDIA_BASE_SIMD_MEDIA_EXPORT_ASM_
+%define MEDIA_BASE_SIMD_MEDIA_EXPORT_ASM_
+
+; Necessary for the mangle() macro.
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; PRIVATE
+; A flag representing the specified symbol is a private symbol. This define adds
+; a hidden flag on Linux and a private_extern flag on Mac. (We can use this
+; private_extern flag only on the latest yasm.)
+;
+%ifdef MACHO
+%define PRIVATE :private_extern
+%elifdef ELF
+%define PRIVATE :hidden
+%else
+%define PRIVATE
+%endif
+
+;
+; EXPORT %1
+; Designates a symbol as PRIVATE if EXPORT_SYMBOLS is not set.
+;
+%macro EXPORT 1
+%ifdef EXPORT_SYMBOLS
+global mangle(%1)
+
+; Windows needs an additional export declaration.
+%ifidn __OUTPUT_FORMAT__,win32
+export mangle(%1)
+%elifidn __OUTPUT_FORMAT__,win64
+export mangle(%1)
+%endif
+
+%else
+global mangle(%1) PRIVATE
+%endif
+%endmacro
+
+%endif ; MEDIA_BASE_SIMD_MEDIA_EXPORT_ASM_
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
new file mode 100644
index 00000000000..583b7cbb0e6
--- /dev/null
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+; Use movq to save the output.
+%define MOVQ movq
+
+; void ScaleYUVToRGB32Row_MMX(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL ScaleYUVToRGB32Row_MMX
+%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
new file mode 100644
index 00000000000..2026390ed00
--- /dev/null
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_mmx.inc
@@ -0,0 +1,117 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+
+ extern mangle(kCoefficientsRgbY)
+
+; Parameters are in the following order:
+; 1. Y plane
+; 2. U plane
+; 3. V plane
+; 4. ARGB frame
+; 5. Width
+; 6. Source dx
+
+PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
+
+%ifdef ARCH_X86_64
+%define WORD_SIZE QWORD
+%else
+%define WORD_SIZE DWORD
+%endif
+
+%ifdef PIC
+ PUSH R1q ; Width
+%endif
+ PUSH R2q ; Source dx
+
+%define SOURCE_DX WORD_SIZE [rsp]
+
+; PIC code.
+%ifdef PIC
+ LOAD_SYM R1q, mangle(kCoefficientsRgbY)
+%define WIDTH WORD_SIZE [rsp + gprsize]
+%define TABLE R1q
+%define Xq R2q
+
+; Non-PIC code.
+%else
+%define WIDTH R1q
+%define TABLE mangle(kCoefficientsRgbY)
+%define Xq R2q
+%endif
+
+ ; Set Xq index to 0.
+ xor Xq, Xq
+ jmp .scaleend
+
+.scaleloop:
+ ; TABLE can either be a register or a symbol depending on this is
+ ; PIC or not.
+ mov TEMPq, Xq
+ sar TEMPq, 17
+ movzx TEMPd, BYTE [Uq + TEMPq]
+ movq mm0, [TABLE + 2048 + 8 * TEMPq]
+ mov TEMPq, Xq
+ sar TEMPq, 17
+ movzx TEMPd, BYTE [Vq + TEMPq]
+ paddsw mm0, [TABLE + 4096 + 8 * TEMPq]
+ mov TEMPq, Xq
+ add Xq, SOURCE_DX
+ sar TEMPq, 16
+ movzx TEMPd, BYTE [Yq + TEMPq]
+ movq mm1, [TABLE + 8 * TEMPq]
+ mov TEMPq, Xq
+ add Xq, SOURCE_DX
+ sar TEMPq, 16
+ movzx TEMPd, BYTE [Yq + TEMPq]
+ movq mm2, [TABLE + 8 * TEMPq]
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+ MOVQ QWORD [ARGBq], mm1
+ add ARGBq, 8
+
+.scaleend:
+ ; WIDTH can either be a register or memory depending on this is
+ ; PIC or not.
+ sub WIDTH, 2
+ jns .scaleloop
+
+ and WIDTH, 1 ; odd number of pixels?
+ jz .scaledone
+
+ mov TEMPq, Xq
+ sar TEMPq, 17
+ movzx TEMPd, BYTE [Uq + TEMPq]
+ movq mm0, [TABLE + 2048 + 8 * TEMPq]
+ mov TEMPq, Xq
+ sar TEMPq, 17
+ movzx TEMPd, BYTE [Vq + TEMPq]
+ paddsw mm0, [TABLE + 4096 + 8 * TEMPq]
+ mov TEMPq, Xq
+ sar TEMPq, 16
+ movzx TEMPd, BYTE [Yq + TEMPq]
+ movq mm1, [TABLE + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+ movd DWORD [ARGBq], mm1
+
+.scaledone:
+%ifdef PIC
+ ADD rsp, 2 * gprsize
+%else
+ ADD rsp, gprsize
+%endif
+ RET
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
new file mode 100644
index 00000000000..536ed18db07
--- /dev/null
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX and SSE instructions.
+;
+ SECTION_TEXT
+ CPU MMX, SSE
+
+; Use movq to save the output.
+%define MOVQ movntq
+
+; void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL ScaleYUVToRGB32Row_SSE
+%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
new file mode 100644
index 00000000000..d6786875a31
--- /dev/null
+++ b/chromium/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
@@ -0,0 +1,110 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "media/base/simd/media_export.asm"
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX, SSE2 and instructions.
+;
+ SECTION_TEXT
+ CPU SSE2
+
+; void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width,
+; ptrdiff_t source_dx);
+%define SYMBOL ScaleYUVToRGB32Row_SSE2_X64
+ EXPORT SYMBOL
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+ extern mangle(kCoefficientsRgbY)
+
+; Parameters are in the following order:
+; 1. Y plane
+; 2. U plane
+; 3. V plane
+; 4. ARGB frame
+; 5. Width
+; 6. Source dx
+
+PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMP
+
+%define TABLEq r10
+%define Xq r11
+%define INDEXq r12
+ PUSH r10
+ PUSH r11
+ PUSH r12
+
+ LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+
+ ; Set Xq index to 0.
+ xor Xq, Xq
+ jmp .scaleend
+
+.scaleloop:
+ ; Read UV pixels.
+ mov INDEXq, Xq
+ sar INDEXq, 17
+ movzx COMPd, BYTE [Uq + INDEXq]
+ movq xmm0, [TABLEq + 2048 + 8 * COMPq]
+ movzx COMPd, BYTE [Vq + INDEXq]
+ movq xmm1, [TABLEq + 4096 + 8 * COMPq]
+
+ ; Read first Y pixel.
+ lea INDEXq, [Xq + SOURCE_DXq] ; INDEXq nows points to next pixel.
+ sar Xq, 16
+ movzx COMPd, BYTE [Yq + Xq]
+ paddsw xmm0, xmm1 ; Hide a ADD after memory load.
+ movq xmm1, [TABLEq + 8 * COMPq]
+
+ ; Read next Y pixel.
+ lea Xq, [INDEXq + SOURCE_DXq] ; Xq now points to next pixel.
+ sar INDEXq, 16
+ movzx COMPd, BYTE [Yq + INDEXq]
+ movq xmm2, [TABLEq + 8 * COMPq]
+ paddsw xmm1, xmm0
+ paddsw xmm2, xmm0
+ shufps xmm1, xmm2, 0x44 ; Join two pixels into one XMM register
+ psraw xmm1, 6
+ packuswb xmm1, xmm1
+ movq QWORD [ARGBq], xmm1
+ add ARGBq, 8
+
+.scaleend:
+ sub WIDTHq, 2
+ jns .scaleloop
+
+ and WIDTHq, 1 ; odd number of pixels?
+ jz .scaledone
+
+ ; Read U V components.
+ mov INDEXq, Xq
+ sar INDEXq, 17
+ movzx COMPd, BYTE [Uq + INDEXq]
+ movq xmm0, [TABLEq + 2048 + 8 * COMPq]
+ movzx COMPd, BYTE [Vq + INDEXq]
+ movq xmm1, [TABLEq + 4096 + 8 * COMPq]
+ paddsw xmm0, xmm1
+
+ ; Read one Y component.
+ mov INDEXq, Xq
+ sar INDEXq, 16
+ movzx COMPd, BYTE [Yq + INDEXq]
+ movq xmm1, [TABLEq + 8 * COMPq]
+ paddsw xmm1, xmm0
+ psraw xmm1, 6
+ packuswb xmm1, xmm1
+ movd DWORD [ARGBq], xmm1
+
+.scaledone:
+ POP r12
+ POP r11
+ POP r10
+ RET
diff --git a/chromium/media/base/simd/sinc_resampler_sse.cc b/chromium/media/base/simd/sinc_resampler_sse.cc
new file mode 100644
index 00000000000..f0aec1ce673
--- /dev/null
+++ b/chromium/media/base/simd/sinc_resampler_sse.cc
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/sinc_resampler.h"
+
+#include <xmmintrin.h>
+
+namespace media {
+
+float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor) {
+ __m128 m_input;
+ __m128 m_sums1 = _mm_setzero_ps();
+ __m128 m_sums2 = _mm_setzero_ps();
+
+ // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
+ // these loops hurt performance in local testing.
+ if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
+ for (int i = 0; i < kKernelSize; i += 4) {
+ m_input = _mm_loadu_ps(input_ptr + i);
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+ }
+ } else {
+ for (int i = 0; i < kKernelSize; i += 4) {
+ m_input = _mm_load_ps(input_ptr + i);
+ m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+ m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+ }
+ }
+
+ // Linearly interpolate the two "convolutions".
+ m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor));
+ m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor));
+ m_sums1 = _mm_add_ps(m_sums1, m_sums2);
+
+ // Sum components together.
+ float result;
+ m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
+ _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
+ m_sums2, m_sums2, 1)));
+
+ return result;
+}
+
+} // namespace media
diff --git a/chromium/media/base/simd/vector_math_sse.cc b/chromium/media/base/simd/vector_math_sse.cc
new file mode 100644
index 00000000000..39bcaa0c195
--- /dev/null
+++ b/chromium/media/base/simd/vector_math_sse.cc
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/vector_math_testing.h"
+
+#include <xmmintrin.h> // NOLINT
+
+namespace media {
+namespace vector_math {
+
+void FMUL_SSE(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ __m128 m_scale = _mm_set_ps1(scale);
+ for (int i = 0; i < last_index; i += 4)
+ _mm_store_ps(dest + i, _mm_mul_ps(_mm_load_ps(src + i), m_scale));
+
+ // Handle any remaining values that wouldn't fit in an SSE pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] = src[i] * scale;
+}
+
+void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ __m128 m_scale = _mm_set_ps1(scale);
+ for (int i = 0; i < last_index; i += 4) {
+ _mm_store_ps(dest + i, _mm_add_ps(_mm_load_ps(dest + i),
+ _mm_mul_ps(_mm_load_ps(src + i), m_scale)));
+ }
+
+ // Handle any remaining values that wouldn't fit in an SSE pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] += src[i] * scale;
+}
+
+} // namespace vector_math
+} // namespace media
diff --git a/chromium/media/base/simd/xcode_hack.c b/chromium/media/base/simd/xcode_hack.c
new file mode 100644
index 00000000000..ee0b615d638
--- /dev/null
+++ b/chromium/media/base/simd/xcode_hack.c
@@ -0,0 +1,10 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// XCode doesn't want to link a pure assembly target and will fail
+// to link when it creates an empty file list. So add a dummy file
+// keep the linker happy. See http://crbug.com/157073
+int xcode_sucks() {
+ return 0;
+}
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.cc b/chromium/media/base/simd/yuv_to_rgb_table.cc
new file mode 100644
index 00000000000..00735655f5f
--- /dev/null
+++ b/chromium/media/base/simd/yuv_to_rgb_table.cc
@@ -0,0 +1,316 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/yuv_to_rgb_table.h"
+
+extern "C" {
+
+// Defines the R,G,B,A contributions from Y.
+#define RGBY(i) { \
+ static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
+ static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
+ static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
+ 0 \
+}
+
+// Defines the R,G,B,A contributions from U.
+// The contribution to A is the same for any value of U
+// causing the final A value to be 255 in every conversion.
+#define RGBU(i) { \
+ static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
+ 0, \
+ static_cast<int16>(256 * 64 - 1) \
+}
+
+// Defines the R,G,B,A contributions from V.
+#define RGBV(i) { \
+ 0, \
+ static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
+ static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
+ 0 \
+}
+
+// Used to define a set of multiplier words for each alpha level.
+#define ALPHA(i) { \
+ i, i, i, i \
+}
+
+// The following table defines the RGBA contributions
+// for each component of YUVA. The Y table is first followed
+// by the U, and V tables. The alpha multiplier table follows.
+// These tables are aligned and kept adjacent to optimize for
+// SIMD and cacheing.
+
+SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]) = {
+ RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
+ RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
+ RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
+ RGBY(0x0C), RGBY(0x0D), RGBY(0x0E), RGBY(0x0F),
+ RGBY(0x10), RGBY(0x11), RGBY(0x12), RGBY(0x13),
+ RGBY(0x14), RGBY(0x15), RGBY(0x16), RGBY(0x17),
+ RGBY(0x18), RGBY(0x19), RGBY(0x1A), RGBY(0x1B),
+ RGBY(0x1C), RGBY(0x1D), RGBY(0x1E), RGBY(0x1F),
+ RGBY(0x20), RGBY(0x21), RGBY(0x22), RGBY(0x23),
+ RGBY(0x24), RGBY(0x25), RGBY(0x26), RGBY(0x27),
+ RGBY(0x28), RGBY(0x29), RGBY(0x2A), RGBY(0x2B),
+ RGBY(0x2C), RGBY(0x2D), RGBY(0x2E), RGBY(0x2F),
+ RGBY(0x30), RGBY(0x31), RGBY(0x32), RGBY(0x33),
+ RGBY(0x34), RGBY(0x35), RGBY(0x36), RGBY(0x37),
+ RGBY(0x38), RGBY(0x39), RGBY(0x3A), RGBY(0x3B),
+ RGBY(0x3C), RGBY(0x3D), RGBY(0x3E), RGBY(0x3F),
+ RGBY(0x40), RGBY(0x41), RGBY(0x42), RGBY(0x43),
+ RGBY(0x44), RGBY(0x45), RGBY(0x46), RGBY(0x47),
+ RGBY(0x48), RGBY(0x49), RGBY(0x4A), RGBY(0x4B),
+ RGBY(0x4C), RGBY(0x4D), RGBY(0x4E), RGBY(0x4F),
+ RGBY(0x50), RGBY(0x51), RGBY(0x52), RGBY(0x53),
+ RGBY(0x54), RGBY(0x55), RGBY(0x56), RGBY(0x57),
+ RGBY(0x58), RGBY(0x59), RGBY(0x5A), RGBY(0x5B),
+ RGBY(0x5C), RGBY(0x5D), RGBY(0x5E), RGBY(0x5F),
+ RGBY(0x60), RGBY(0x61), RGBY(0x62), RGBY(0x63),
+ RGBY(0x64), RGBY(0x65), RGBY(0x66), RGBY(0x67),
+ RGBY(0x68), RGBY(0x69), RGBY(0x6A), RGBY(0x6B),
+ RGBY(0x6C), RGBY(0x6D), RGBY(0x6E), RGBY(0x6F),
+ RGBY(0x70), RGBY(0x71), RGBY(0x72), RGBY(0x73),
+ RGBY(0x74), RGBY(0x75), RGBY(0x76), RGBY(0x77),
+ RGBY(0x78), RGBY(0x79), RGBY(0x7A), RGBY(0x7B),
+ RGBY(0x7C), RGBY(0x7D), RGBY(0x7E), RGBY(0x7F),
+ RGBY(0x80), RGBY(0x81), RGBY(0x82), RGBY(0x83),
+ RGBY(0x84), RGBY(0x85), RGBY(0x86), RGBY(0x87),
+ RGBY(0x88), RGBY(0x89), RGBY(0x8A), RGBY(0x8B),
+ RGBY(0x8C), RGBY(0x8D), RGBY(0x8E), RGBY(0x8F),
+ RGBY(0x90), RGBY(0x91), RGBY(0x92), RGBY(0x93),
+ RGBY(0x94), RGBY(0x95), RGBY(0x96), RGBY(0x97),
+ RGBY(0x98), RGBY(0x99), RGBY(0x9A), RGBY(0x9B),
+ RGBY(0x9C), RGBY(0x9D), RGBY(0x9E), RGBY(0x9F),
+ RGBY(0xA0), RGBY(0xA1), RGBY(0xA2), RGBY(0xA3),
+ RGBY(0xA4), RGBY(0xA5), RGBY(0xA6), RGBY(0xA7),
+ RGBY(0xA8), RGBY(0xA9), RGBY(0xAA), RGBY(0xAB),
+ RGBY(0xAC), RGBY(0xAD), RGBY(0xAE), RGBY(0xAF),
+ RGBY(0xB0), RGBY(0xB1), RGBY(0xB2), RGBY(0xB3),
+ RGBY(0xB4), RGBY(0xB5), RGBY(0xB6), RGBY(0xB7),
+ RGBY(0xB8), RGBY(0xB9), RGBY(0xBA), RGBY(0xBB),
+ RGBY(0xBC), RGBY(0xBD), RGBY(0xBE), RGBY(0xBF),
+ RGBY(0xC0), RGBY(0xC1), RGBY(0xC2), RGBY(0xC3),
+ RGBY(0xC4), RGBY(0xC5), RGBY(0xC6), RGBY(0xC7),
+ RGBY(0xC8), RGBY(0xC9), RGBY(0xCA), RGBY(0xCB),
+ RGBY(0xCC), RGBY(0xCD), RGBY(0xCE), RGBY(0xCF),
+ RGBY(0xD0), RGBY(0xD1), RGBY(0xD2), RGBY(0xD3),
+ RGBY(0xD4), RGBY(0xD5), RGBY(0xD6), RGBY(0xD7),
+ RGBY(0xD8), RGBY(0xD9), RGBY(0xDA), RGBY(0xDB),
+ RGBY(0xDC), RGBY(0xDD), RGBY(0xDE), RGBY(0xDF),
+ RGBY(0xE0), RGBY(0xE1), RGBY(0xE2), RGBY(0xE3),
+ RGBY(0xE4), RGBY(0xE5), RGBY(0xE6), RGBY(0xE7),
+ RGBY(0xE8), RGBY(0xE9), RGBY(0xEA), RGBY(0xEB),
+ RGBY(0xEC), RGBY(0xED), RGBY(0xEE), RGBY(0xEF),
+ RGBY(0xF0), RGBY(0xF1), RGBY(0xF2), RGBY(0xF3),
+ RGBY(0xF4), RGBY(0xF5), RGBY(0xF6), RGBY(0xF7),
+ RGBY(0xF8), RGBY(0xF9), RGBY(0xFA), RGBY(0xFB),
+ RGBY(0xFC), RGBY(0xFD), RGBY(0xFE), RGBY(0xFF),
+
+ // Chroma U table.
+ RGBU(0x00), RGBU(0x01), RGBU(0x02), RGBU(0x03),
+ RGBU(0x04), RGBU(0x05), RGBU(0x06), RGBU(0x07),
+ RGBU(0x08), RGBU(0x09), RGBU(0x0A), RGBU(0x0B),
+ RGBU(0x0C), RGBU(0x0D), RGBU(0x0E), RGBU(0x0F),
+ RGBU(0x10), RGBU(0x11), RGBU(0x12), RGBU(0x13),
+ RGBU(0x14), RGBU(0x15), RGBU(0x16), RGBU(0x17),
+ RGBU(0x18), RGBU(0x19), RGBU(0x1A), RGBU(0x1B),
+ RGBU(0x1C), RGBU(0x1D), RGBU(0x1E), RGBU(0x1F),
+ RGBU(0x20), RGBU(0x21), RGBU(0x22), RGBU(0x23),
+ RGBU(0x24), RGBU(0x25), RGBU(0x26), RGBU(0x27),
+ RGBU(0x28), RGBU(0x29), RGBU(0x2A), RGBU(0x2B),
+ RGBU(0x2C), RGBU(0x2D), RGBU(0x2E), RGBU(0x2F),
+ RGBU(0x30), RGBU(0x31), RGBU(0x32), RGBU(0x33),
+ RGBU(0x34), RGBU(0x35), RGBU(0x36), RGBU(0x37),
+ RGBU(0x38), RGBU(0x39), RGBU(0x3A), RGBU(0x3B),
+ RGBU(0x3C), RGBU(0x3D), RGBU(0x3E), RGBU(0x3F),
+ RGBU(0x40), RGBU(0x41), RGBU(0x42), RGBU(0x43),
+ RGBU(0x44), RGBU(0x45), RGBU(0x46), RGBU(0x47),
+ RGBU(0x48), RGBU(0x49), RGBU(0x4A), RGBU(0x4B),
+ RGBU(0x4C), RGBU(0x4D), RGBU(0x4E), RGBU(0x4F),
+ RGBU(0x50), RGBU(0x51), RGBU(0x52), RGBU(0x53),
+ RGBU(0x54), RGBU(0x55), RGBU(0x56), RGBU(0x57),
+ RGBU(0x58), RGBU(0x59), RGBU(0x5A), RGBU(0x5B),
+ RGBU(0x5C), RGBU(0x5D), RGBU(0x5E), RGBU(0x5F),
+ RGBU(0x60), RGBU(0x61), RGBU(0x62), RGBU(0x63),
+ RGBU(0x64), RGBU(0x65), RGBU(0x66), RGBU(0x67),
+ RGBU(0x68), RGBU(0x69), RGBU(0x6A), RGBU(0x6B),
+ RGBU(0x6C), RGBU(0x6D), RGBU(0x6E), RGBU(0x6F),
+ RGBU(0x70), RGBU(0x71), RGBU(0x72), RGBU(0x73),
+ RGBU(0x74), RGBU(0x75), RGBU(0x76), RGBU(0x77),
+ RGBU(0x78), RGBU(0x79), RGBU(0x7A), RGBU(0x7B),
+ RGBU(0x7C), RGBU(0x7D), RGBU(0x7E), RGBU(0x7F),
+ RGBU(0x80), RGBU(0x81), RGBU(0x82), RGBU(0x83),
+ RGBU(0x84), RGBU(0x85), RGBU(0x86), RGBU(0x87),
+ RGBU(0x88), RGBU(0x89), RGBU(0x8A), RGBU(0x8B),
+ RGBU(0x8C), RGBU(0x8D), RGBU(0x8E), RGBU(0x8F),
+ RGBU(0x90), RGBU(0x91), RGBU(0x92), RGBU(0x93),
+ RGBU(0x94), RGBU(0x95), RGBU(0x96), RGBU(0x97),
+ RGBU(0x98), RGBU(0x99), RGBU(0x9A), RGBU(0x9B),
+ RGBU(0x9C), RGBU(0x9D), RGBU(0x9E), RGBU(0x9F),
+ RGBU(0xA0), RGBU(0xA1), RGBU(0xA2), RGBU(0xA3),
+ RGBU(0xA4), RGBU(0xA5), RGBU(0xA6), RGBU(0xA7),
+ RGBU(0xA8), RGBU(0xA9), RGBU(0xAA), RGBU(0xAB),
+ RGBU(0xAC), RGBU(0xAD), RGBU(0xAE), RGBU(0xAF),
+ RGBU(0xB0), RGBU(0xB1), RGBU(0xB2), RGBU(0xB3),
+ RGBU(0xB4), RGBU(0xB5), RGBU(0xB6), RGBU(0xB7),
+ RGBU(0xB8), RGBU(0xB9), RGBU(0xBA), RGBU(0xBB),
+ RGBU(0xBC), RGBU(0xBD), RGBU(0xBE), RGBU(0xBF),
+ RGBU(0xC0), RGBU(0xC1), RGBU(0xC2), RGBU(0xC3),
+ RGBU(0xC4), RGBU(0xC5), RGBU(0xC6), RGBU(0xC7),
+ RGBU(0xC8), RGBU(0xC9), RGBU(0xCA), RGBU(0xCB),
+ RGBU(0xCC), RGBU(0xCD), RGBU(0xCE), RGBU(0xCF),
+ RGBU(0xD0), RGBU(0xD1), RGBU(0xD2), RGBU(0xD3),
+ RGBU(0xD4), RGBU(0xD5), RGBU(0xD6), RGBU(0xD7),
+ RGBU(0xD8), RGBU(0xD9), RGBU(0xDA), RGBU(0xDB),
+ RGBU(0xDC), RGBU(0xDD), RGBU(0xDE), RGBU(0xDF),
+ RGBU(0xE0), RGBU(0xE1), RGBU(0xE2), RGBU(0xE3),
+ RGBU(0xE4), RGBU(0xE5), RGBU(0xE6), RGBU(0xE7),
+ RGBU(0xE8), RGBU(0xE9), RGBU(0xEA), RGBU(0xEB),
+ RGBU(0xEC), RGBU(0xED), RGBU(0xEE), RGBU(0xEF),
+ RGBU(0xF0), RGBU(0xF1), RGBU(0xF2), RGBU(0xF3),
+ RGBU(0xF4), RGBU(0xF5), RGBU(0xF6), RGBU(0xF7),
+ RGBU(0xF8), RGBU(0xF9), RGBU(0xFA), RGBU(0xFB),
+ RGBU(0xFC), RGBU(0xFD), RGBU(0xFE), RGBU(0xFF),
+
+ // Chroma V table.
+ RGBV(0x00), RGBV(0x01), RGBV(0x02), RGBV(0x03),
+ RGBV(0x04), RGBV(0x05), RGBV(0x06), RGBV(0x07),
+ RGBV(0x08), RGBV(0x09), RGBV(0x0A), RGBV(0x0B),
+ RGBV(0x0C), RGBV(0x0D), RGBV(0x0E), RGBV(0x0F),
+ RGBV(0x10), RGBV(0x11), RGBV(0x12), RGBV(0x13),
+ RGBV(0x14), RGBV(0x15), RGBV(0x16), RGBV(0x17),
+ RGBV(0x18), RGBV(0x19), RGBV(0x1A), RGBV(0x1B),
+ RGBV(0x1C), RGBV(0x1D), RGBV(0x1E), RGBV(0x1F),
+ RGBV(0x20), RGBV(0x21), RGBV(0x22), RGBV(0x23),
+ RGBV(0x24), RGBV(0x25), RGBV(0x26), RGBV(0x27),
+ RGBV(0x28), RGBV(0x29), RGBV(0x2A), RGBV(0x2B),
+ RGBV(0x2C), RGBV(0x2D), RGBV(0x2E), RGBV(0x2F),
+ RGBV(0x30), RGBV(0x31), RGBV(0x32), RGBV(0x33),
+ RGBV(0x34), RGBV(0x35), RGBV(0x36), RGBV(0x37),
+ RGBV(0x38), RGBV(0x39), RGBV(0x3A), RGBV(0x3B),
+ RGBV(0x3C), RGBV(0x3D), RGBV(0x3E), RGBV(0x3F),
+ RGBV(0x40), RGBV(0x41), RGBV(0x42), RGBV(0x43),
+ RGBV(0x44), RGBV(0x45), RGBV(0x46), RGBV(0x47),
+ RGBV(0x48), RGBV(0x49), RGBV(0x4A), RGBV(0x4B),
+ RGBV(0x4C), RGBV(0x4D), RGBV(0x4E), RGBV(0x4F),
+ RGBV(0x50), RGBV(0x51), RGBV(0x52), RGBV(0x53),
+ RGBV(0x54), RGBV(0x55), RGBV(0x56), RGBV(0x57),
+ RGBV(0x58), RGBV(0x59), RGBV(0x5A), RGBV(0x5B),
+ RGBV(0x5C), RGBV(0x5D), RGBV(0x5E), RGBV(0x5F),
+ RGBV(0x60), RGBV(0x61), RGBV(0x62), RGBV(0x63),
+ RGBV(0x64), RGBV(0x65), RGBV(0x66), RGBV(0x67),
+ RGBV(0x68), RGBV(0x69), RGBV(0x6A), RGBV(0x6B),
+ RGBV(0x6C), RGBV(0x6D), RGBV(0x6E), RGBV(0x6F),
+ RGBV(0x70), RGBV(0x71), RGBV(0x72), RGBV(0x73),
+ RGBV(0x74), RGBV(0x75), RGBV(0x76), RGBV(0x77),
+ RGBV(0x78), RGBV(0x79), RGBV(0x7A), RGBV(0x7B),
+ RGBV(0x7C), RGBV(0x7D), RGBV(0x7E), RGBV(0x7F),
+ RGBV(0x80), RGBV(0x81), RGBV(0x82), RGBV(0x83),
+ RGBV(0x84), RGBV(0x85), RGBV(0x86), RGBV(0x87),
+ RGBV(0x88), RGBV(0x89), RGBV(0x8A), RGBV(0x8B),
+ RGBV(0x8C), RGBV(0x8D), RGBV(0x8E), RGBV(0x8F),
+ RGBV(0x90), RGBV(0x91), RGBV(0x92), RGBV(0x93),
+ RGBV(0x94), RGBV(0x95), RGBV(0x96), RGBV(0x97),
+ RGBV(0x98), RGBV(0x99), RGBV(0x9A), RGBV(0x9B),
+ RGBV(0x9C), RGBV(0x9D), RGBV(0x9E), RGBV(0x9F),
+ RGBV(0xA0), RGBV(0xA1), RGBV(0xA2), RGBV(0xA3),
+ RGBV(0xA4), RGBV(0xA5), RGBV(0xA6), RGBV(0xA7),
+ RGBV(0xA8), RGBV(0xA9), RGBV(0xAA), RGBV(0xAB),
+ RGBV(0xAC), RGBV(0xAD), RGBV(0xAE), RGBV(0xAF),
+ RGBV(0xB0), RGBV(0xB1), RGBV(0xB2), RGBV(0xB3),
+ RGBV(0xB4), RGBV(0xB5), RGBV(0xB6), RGBV(0xB7),
+ RGBV(0xB8), RGBV(0xB9), RGBV(0xBA), RGBV(0xBB),
+ RGBV(0xBC), RGBV(0xBD), RGBV(0xBE), RGBV(0xBF),
+ RGBV(0xC0), RGBV(0xC1), RGBV(0xC2), RGBV(0xC3),
+ RGBV(0xC4), RGBV(0xC5), RGBV(0xC6), RGBV(0xC7),
+ RGBV(0xC8), RGBV(0xC9), RGBV(0xCA), RGBV(0xCB),
+ RGBV(0xCC), RGBV(0xCD), RGBV(0xCE), RGBV(0xCF),
+ RGBV(0xD0), RGBV(0xD1), RGBV(0xD2), RGBV(0xD3),
+ RGBV(0xD4), RGBV(0xD5), RGBV(0xD6), RGBV(0xD7),
+ RGBV(0xD8), RGBV(0xD9), RGBV(0xDA), RGBV(0xDB),
+ RGBV(0xDC), RGBV(0xDD), RGBV(0xDE), RGBV(0xDF),
+ RGBV(0xE0), RGBV(0xE1), RGBV(0xE2), RGBV(0xE3),
+ RGBV(0xE4), RGBV(0xE5), RGBV(0xE6), RGBV(0xE7),
+ RGBV(0xE8), RGBV(0xE9), RGBV(0xEA), RGBV(0xEB),
+ RGBV(0xEC), RGBV(0xED), RGBV(0xEE), RGBV(0xEF),
+ RGBV(0xF0), RGBV(0xF1), RGBV(0xF2), RGBV(0xF3),
+ RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
+ RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
+ RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
+
+ // Alpha multipliers for each alpha level.
+ ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03),
+ ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07),
+ ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B),
+ ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F),
+ ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13),
+ ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17),
+ ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B),
+ ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F),
+ ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23),
+ ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27),
+ ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B),
+ ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F),
+ ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33),
+ ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37),
+ ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B),
+ ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F),
+ ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43),
+ ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47),
+ ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B),
+ ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F),
+ ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53),
+ ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57),
+ ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B),
+ ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F),
+ ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63),
+ ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67),
+ ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B),
+ ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F),
+ ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73),
+ ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77),
+ ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B),
+ ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F),
+ ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83),
+ ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87),
+ ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B),
+ ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F),
+ ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93),
+ ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97),
+ ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B),
+ ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F),
+ ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3),
+ ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7),
+ ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB),
+ ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF),
+ ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3),
+ ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7),
+ ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB),
+ ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF),
+ ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3),
+ ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7),
+ ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB),
+ ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF),
+ ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3),
+ ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7),
+ ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB),
+ ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF),
+ ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3),
+ ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7),
+ ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB),
+ ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF),
+ ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3),
+ ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7),
+ ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB),
+ ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF),
+};
+
+#undef RGBY
+#undef RGBU
+#undef RGBV
+#undef ALPHA
+
+} // extern "C"
diff --git a/chromium/media/base/simd/yuv_to_rgb_table.h b/chromium/media/base/simd/yuv_to_rgb_table.h
new file mode 100644
index 00000000000..aebf1b20d19
--- /dev/null
+++ b/chromium/media/base/simd/yuv_to_rgb_table.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines convertion table from YUV to RGB.
+
+#ifndef MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
+#define MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
+
+#include "base/basictypes.h"
+#include "build/build_config.h"
+
+extern "C" {
+
+#if defined(COMPILER_MSVC)
+#define SIMD_ALIGNED(var) __declspec(align(16)) var
+#else
+#define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
+#endif
+
+// Align the table to 16-bytes to allow faster reading.
+extern SIMD_ALIGNED(const int16 kCoefficientsRgbY[256 * 4][4]);
+
+} // extern "C"
+
+#endif // MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
diff --git a/chromium/media/base/sinc_resampler.cc b/chromium/media/base/sinc_resampler.cc
new file mode 100644
index 00000000000..a2918c3f0d2
--- /dev/null
+++ b/chromium/media/base/sinc_resampler.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
+// and r4_ will move after the first load):
+//
+// |----------------|-----------------------------------------|----------------|
+//
+// request_frames_
+// <--------------------------------------------------------->
+// r0_ (during first load)
+//
+// kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
+// <---------------> <---------------> <---------------> <--------------->
+// r1_ r2_ r3_ r4_
+//
+// block_size_ == r4_ - r2_
+// <--------------------------------------->
+//
+// request_frames_
+// <------------------ ... ----------------->
+// r0_ (during second load)
+//
+// On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
+// and block_size_ are reinitialized via step (3) in the algorithm below.
+//
+// These new regions remain constant until a Flush() occurs. While complicated,
+// this allows us to reduce jitter by always requesting the same amount from the
+// provided callback.
+//
+// The algorithm:
+//
+// 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
+// there's enough room to read request_frames_ from the callback into region
+// r0_ (which will move between the first and subsequent passes).
+//
+// 2) Let r1_, r2_ each represent half the kernel centered around r0_:
+//
+// r0_ = input_buffer_ + kKernelSize / 2
+// r1_ = input_buffer_
+// r2_ = r0_
+//
+// r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in
+// size. r1_ must be zero initialized to avoid convolution with garbage (see
+// step (5) for why).
+//
+// 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
+// r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
+//
+// r3_ = r0_ + request_frames_ - kKernelSize
+// r4_ = r0_ + request_frames_ - kKernelSize / 2
+// block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
+//
+// 4) Consume request_frames_ frames into r0_.
+//
+// 5) Position kernel centered at start of r2_ and generate output frames until
+// the kernel is centered at the start of r4_ or we've finished generating
+// all the output frames.
+//
+// 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
+//
+// 7) If we're on the second load, in order to avoid overwriting the frames we
+// just wrapped from r4_ we need to slide r0_ to the right by the size of
+// r4_, which is kKernelSize / 2:
+//
+// r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
+//
+// r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
+//
+// 8) Else, if we're not on the second load, goto (4).
+//
+// Note: we're glossing over how the sub-sample handling works with
+// |virtual_source_idx_|, etc.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "media/base/sinc_resampler.h"
+
+#include <cmath>
+#include <limits>
+
+#include "base/cpu.h"
+#include "base/logging.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#include <arm_neon.h>
+#endif
+
+namespace media {
+
+static double SincScaleFactor(double io_ratio) {
+ // |sinc_scale_factor| is basically the normalized cutoff frequency of the
+ // low-pass filter.
+ double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
+
+ // The sinc function is an idealized brick-wall filter, but since we're
+ // windowing it the transition from pass to stop does not happen right away.
+ // So we should adjust the low pass filter cutoff slightly downward to avoid
+ // some aliasing at the very high-end.
+ // TODO(crogers): this value is empirical and to be more exact should vary
+ // depending on kKernelSize.
+ sinc_scale_factor *= 0.9;
+
+ return sinc_scale_factor;
+}
+
+// If we know the minimum architecture at compile time, avoid CPU detection.
+// Force NaCl code to use C routines since (at present) nothing there uses these
+// methods and plumbing the -msse built library is non-trivial. iOS lies
+// about its architecture, so we also need to exclude it here.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+#if defined(__SSE__)
+#define CONVOLVE_FUNC Convolve_SSE
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#else
+// X86 CPU detection required. Functions will be set by
+// InitializeCPUSpecificFeatures().
+// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
+#define CONVOLVE_FUNC g_convolve_proc_
+
+typedef float (*ConvolveProc)(const float*, const float*, const float*, double);
+static ConvolveProc g_convolve_proc_ = NULL;
+
+void SincResampler::InitializeCPUSpecificFeatures() {
+ CHECK(!g_convolve_proc_);
+ g_convolve_proc_ = base::CPU().has_sse() ? Convolve_SSE : Convolve_C;
+}
+#endif
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define CONVOLVE_FUNC Convolve_NEON
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#else
+// Unknown architecture.
+#define CONVOLVE_FUNC Convolve_C
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#endif
+
+SincResampler::SincResampler(double io_sample_rate_ratio,
+ int request_frames,
+ const ReadCB& read_cb)
+ : io_sample_rate_ratio_(io_sample_rate_ratio),
+ read_cb_(read_cb),
+ request_frames_(request_frames),
+ input_buffer_size_(request_frames_ + kKernelSize),
+ // Create input buffers with a 16-byte alignment for SSE optimizations.
+ kernel_storage_(static_cast<float*>(
+ base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
+ kernel_pre_sinc_storage_(static_cast<float*>(
+ base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
+ kernel_window_storage_(static_cast<float*>(
+ base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
+ input_buffer_(static_cast<float*>(
+ base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
+ r1_(input_buffer_.get()),
+ r2_(input_buffer_.get() + kKernelSize / 2) {
+ CHECK_GT(request_frames_, 0);
+ Flush();
+ CHECK_GT(block_size_, kKernelSize)
+ << "block_size must be greater than kKernelSize!";
+
+ memset(kernel_storage_.get(), 0,
+ sizeof(*kernel_storage_.get()) * kKernelStorageSize);
+ memset(kernel_pre_sinc_storage_.get(), 0,
+ sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
+ memset(kernel_window_storage_.get(), 0,
+ sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
+
+ InitializeKernel();
+}
+
+SincResampler::~SincResampler() {}
+
+void SincResampler::UpdateRegions(bool second_load) {
+ // Setup various region pointers in the buffer (see diagram above). If we're
+ // on the second load we need to slide r0_ to the right by kKernelSize / 2.
+ r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
+ r3_ = r0_ + request_frames_ - kKernelSize;
+ r4_ = r0_ + request_frames_ - kKernelSize / 2;
+ block_size_ = r4_ - r2_;
+
+ // r1_ at the beginning of the buffer.
+ CHECK_EQ(r1_, input_buffer_.get());
+ // r1_ left of r2_, r4_ left of r3_ and size correct.
+ CHECK_EQ(r2_ - r1_, r4_ - r3_);
+ // r2_ left of r3.
+ CHECK_LT(r2_, r3_);
+}
+
+void SincResampler::InitializeKernel() {
+ // Blackman window parameters.
+ static const double kAlpha = 0.16;
+ static const double kA0 = 0.5 * (1.0 - kAlpha);
+ static const double kA1 = 0.5;
+ static const double kA2 = 0.5 * kAlpha;
+
+ // Generates a set of windowed sinc() kernels.
+ // We generate a range of sub-sample offsets from 0.0 to 1.0.
+ const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
+ for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+ const float subsample_offset =
+ static_cast<float>(offset_idx) / kKernelOffsetCount;
+
+ for (int i = 0; i < kKernelSize; ++i) {
+ const int idx = i + offset_idx * kKernelSize;
+ const float pre_sinc = M_PI * (i - kKernelSize / 2 - subsample_offset);
+ kernel_pre_sinc_storage_[idx] = pre_sinc;
+
+ // Compute Blackman window, matching the offset of the sinc().
+ const float x = (i - subsample_offset) / kKernelSize;
+ const float window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2
+ * cos(4.0 * M_PI * x);
+ kernel_window_storage_[idx] = window;
+
+ // Compute the sinc with offset, then window the sinc() function and store
+ // at the correct offset.
+ if (pre_sinc == 0) {
+ kernel_storage_[idx] = sinc_scale_factor * window;
+ } else {
+ kernel_storage_[idx] =
+ window * sin(sinc_scale_factor * pre_sinc) / pre_sinc;
+ }
+ }
+ }
+}
+
+void SincResampler::SetRatio(double io_sample_rate_ratio) {
+ if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
+ std::numeric_limits<double>::epsilon()) {
+ return;
+ }
+
+ io_sample_rate_ratio_ = io_sample_rate_ratio;
+
+ // Optimize reinitialization by reusing values which are independent of
+ // |sinc_scale_factor|. Provides a 3x speedup.
+ const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
+ for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+ for (int i = 0; i < kKernelSize; ++i) {
+ const int idx = i + offset_idx * kKernelSize;
+ const float window = kernel_window_storage_[idx];
+ const float pre_sinc = kernel_pre_sinc_storage_[idx];
+
+ if (pre_sinc == 0) {
+ kernel_storage_[idx] = sinc_scale_factor * window;
+ } else {
+ kernel_storage_[idx] =
+ window * sin(sinc_scale_factor * pre_sinc) / pre_sinc;
+ }
+ }
+ }
+}
+
+void SincResampler::Resample(int frames, float* destination) {
+ int remaining_frames = frames;
+
+ // Step (1) -- Prime the input buffer at the start of the input stream.
+ if (!buffer_primed_ && remaining_frames) {
+ read_cb_.Run(request_frames_, r0_);
+ buffer_primed_ = true;
+ }
+
+ // Step (2) -- Resample! const what we can outside of the loop for speed. It
+ // actually has an impact on ARM performance. See inner loop comment below.
+ const double current_io_ratio = io_sample_rate_ratio_;
+ const float* const kernel_ptr = kernel_storage_.get();
+ while (remaining_frames) {
+ // |i| may be negative if the last Resample() call ended on an iteration
+ // that put |virtual_source_idx_| over the limit.
+ //
+ // Note: The loop construct here can severely impact performance on ARM
+ // or when built with clang. See https://codereview.chromium.org/18566009/
+ for (int i = ceil((block_size_ - virtual_source_idx_) / current_io_ratio);
+ i > 0; --i) {
+ DCHECK_LT(virtual_source_idx_, block_size_);
+
+ // |virtual_source_idx_| lies in between two kernel offsets so figure out
+ // what they are.
+ const int source_idx = virtual_source_idx_;
+ const double subsample_remainder = virtual_source_idx_ - source_idx;
+
+ const double virtual_offset_idx =
+ subsample_remainder * kKernelOffsetCount;
+ const int offset_idx = virtual_offset_idx;
+
+ // We'll compute "convolutions" for the two kernels which straddle
+ // |virtual_source_idx_|.
+ const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
+ const float* const k2 = k1 + kKernelSize;
+
+ // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be
+ // true so long as kKernelSize is a multiple of 16.
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F);
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F);
+
+ // Initialize input pointer based on quantized |virtual_source_idx_|.
+ const float* const input_ptr = r1_ + source_idx;
+
+ // Figure out how much to weight each kernel's "convolution".
+ const double kernel_interpolation_factor =
+ virtual_offset_idx - offset_idx;
+ *destination++ = CONVOLVE_FUNC(
+ input_ptr, k1, k2, kernel_interpolation_factor);
+
+ // Advance the virtual index.
+ virtual_source_idx_ += current_io_ratio;
+
+ if (!--remaining_frames)
+ return;
+ }
+
+ // Wrap back around to the start.
+ virtual_source_idx_ -= block_size_;
+
+ // Step (3) -- Copy r3_, r4_ to r1_, r2_.
+ // This wraps the last input frames back to the start of the buffer.
+ memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
+
+ // Step (4) -- Reinitialize regions if necessary.
+ if (r0_ == r2_)
+ UpdateRegions(true);
+
+ // Step (5) -- Refresh the buffer with more input.
+ read_cb_.Run(request_frames_, r0_);
+ }
+}
+
+#undef CONVOLVE_FUNC
+
+int SincResampler::ChunkSize() const {
+ return block_size_ / io_sample_rate_ratio_;
+}
+
+void SincResampler::Flush() {
+ virtual_source_idx_ = 0;
+ buffer_primed_ = false;
+ memset(input_buffer_.get(), 0,
+ sizeof(*input_buffer_.get()) * input_buffer_size_);
+ UpdateRegions(false);
+}
+
+float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor) {
+ float sum1 = 0;
+ float sum2 = 0;
+
+ // Generate a single output sample. Unrolling this loop hurt performance in
+ // local testing.
+ int n = kKernelSize;
+ while (n--) {
+ sum1 += *input_ptr * *k1++;
+ sum2 += *input_ptr++ * *k2++;
+ }
+
+ // Linearly interpolate the two "convolutions".
+ return (1.0 - kernel_interpolation_factor) * sum1
+ + kernel_interpolation_factor * sum2;
+}
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor) {
+ float32x4_t m_input;
+ float32x4_t m_sums1 = vmovq_n_f32(0);
+ float32x4_t m_sums2 = vmovq_n_f32(0);
+
+ const float* upper = input_ptr + kKernelSize;
+ for (; input_ptr < upper; ) {
+ m_input = vld1q_f32(input_ptr);
+ input_ptr += 4;
+ m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+ k1 += 4;
+ m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+ k2 += 4;
+ }
+
+ // Linearly interpolate the two "convolutions".
+ m_sums1 = vmlaq_f32(
+ vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
+ m_sums2, vmovq_n_f32(kernel_interpolation_factor));
+
+ // Sum components together.
+ float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
+ return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
+}
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/sinc_resampler.h b/chromium/media/base/sinc_resampler.h
new file mode 100644
index 00000000000..facd1a106df
--- /dev/null
+++ b/chromium/media/base/sinc_resampler.h
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SINC_RESAMPLER_H_
+#define MEDIA_BASE_SINC_RESAMPLER_H_
+
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/scoped_ptr.h"
+#include "build/build_config.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// SincResampler is a high-quality single-channel sample-rate converter.
+class MEDIA_EXPORT SincResampler {
+ public:
+ enum {
+ // The kernel size can be adjusted for quality (higher is better) at the
+ // expense of performance. Must be a multiple of 32.
+ // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
+ kKernelSize = 32,
+
+ // Default request size. Affects how often and for how much SincResampler
+ // calls back for input. Must be greater than kKernelSize.
+ kDefaultRequestSize = 512,
+
+ // The kernel offset count is used for interpolation and is the number of
+ // sub-sample kernel shifts. Can be adjusted for quality (higher is better)
+ // at the expense of allocating more memory.
+ kKernelOffsetCount = 32,
+ kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1),
+ };
+
+ // Selects runtime specific CPU features like SSE. Must be called before
+ // using SincResampler.
+ static void InitializeCPUSpecificFeatures();
+
+ // Callback type for providing more data into the resampler. Expects |frames|
+ // of data to be rendered into |destination|; zero padded if not enough frames
+ // are available to satisfy the request.
+ typedef base::Callback<void(int frames, float* destination)> ReadCB;
+
+ // Constructs a SincResampler with the specified |read_cb|, which is used to
+ // acquire audio data for resampling. |io_sample_rate_ratio| is the ratio
+ // of input / output sample rates. |request_frames| controls the size in
+ // frames of the buffer requested by each |read_cb| call. The value must be
+ // greater than kKernelSize. Specify kDefaultRequestSize if there are no
+ // request size constraints.
+ SincResampler(double io_sample_rate_ratio,
+ int request_frames,
+ const ReadCB& read_cb);
+ virtual ~SincResampler();
+
+ // Resample |frames| of data from |read_cb_| into |destination|.
+ void Resample(int frames, float* destination);
+
+ // The maximum size in frames that guarantees Resample() will only make a
+ // single call to |read_cb_| for more data.
+ int ChunkSize() const;
+
+ // Flush all buffered data and reset internal indices. Not thread safe, do
+ // not call while Resample() is in progress.
+ void Flush();
+
+ // Update |io_sample_rate_ratio_|. SetRatio() will cause a reconstruction of
+ // the kernels used for resampling. Not thread safe, do not call while
+ // Resample() is in progress.
+ void SetRatio(double io_sample_rate_ratio);
+
+ float* get_kernel_for_testing() { return kernel_storage_.get(); }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
+ FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark);
+
+ void InitializeKernel();
+ void UpdateRegions(bool second_load);
+
+ // Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are
+ // linearly interpolated using |kernel_interpolation_factor|. On x86, the
+ // underlying implementation is chosen at run time based on SSE support. On
+ // ARM, NEON support is chosen at compile time based on compilation flags.
+ static float Convolve_C(const float* input_ptr, const float* k1,
+ const float* k2, double kernel_interpolation_factor);
+#if defined(ARCH_CPU_X86_FAMILY)
+ static float Convolve_SSE(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor);
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+ static float Convolve_NEON(const float* input_ptr, const float* k1,
+ const float* k2,
+ double kernel_interpolation_factor);
+#endif
+
+ // The ratio of input / output sample rates.
+ double io_sample_rate_ratio_;
+
+ // An index on the source input buffer with sub-sample precision. It must be
+ // double precision to avoid drift.
+ double virtual_source_idx_;
+
+ // The buffer is primed once at the very beginning of processing.
+ bool buffer_primed_;
+
+ // Source of data for resampling.
+ const ReadCB read_cb_;
+
+ // The size (in samples) to request from each |read_cb_| execution.
+ const int request_frames_;
+
+ // The number of source frames processed per pass.
+ int block_size_;
+
+ // The size (in samples) of the internal buffer used by the resampler.
+ const int input_buffer_size_;
+
+ // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
+ // The kernel offsets are sub-sample shifts of a windowed sinc shifted from
+ // 0.0 to 1.0 sample.
+ scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_storage_;
+ scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_pre_sinc_storage_;
+ scoped_ptr<float[], base::ScopedPtrAlignedFree> kernel_window_storage_;
+
+ // Data from the source is copied into this buffer for each processing pass.
+ scoped_ptr<float[], base::ScopedPtrAlignedFree> input_buffer_;
+
+ // Pointers to the various regions inside |input_buffer_|. See the diagram at
+ // the top of the .cc file for more information.
+ float* r0_;
+ float* const r1_;
+ float* const r2_;
+ float* r3_;
+ float* r4_;
+
+ DISALLOW_COPY_AND_ASSIGN(SincResampler);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_SINC_RESAMPLER_H_
diff --git a/chromium/media/base/sinc_resampler_unittest.cc b/chromium/media/base/sinc_resampler_unittest.cc
new file mode 100644
index 00000000000..8b89a5d3808
--- /dev/null
+++ b/chromium/media/base/sinc_resampler_unittest.cc
@@ -0,0 +1,444 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/cpu.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringize_macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/base/sinc_resampler.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+
+namespace media {
+
+static const double kSampleRateRatio = 192000.0 / 44100.0;
+static const double kKernelInterpolationFactor = 0.5;
+
+// Command line switch for runtime adjustment of ConvolveBenchmark iterations.
+static const char kConvolveIterations[] = "convolve-iterations";
+
+// Helper class to ensure ChunkedResample() functions properly.
+class MockSource {
+ public:
+ MOCK_METHOD2(ProvideInput, void(int frames, float* destination));
+};
+
+ACTION(ClearBuffer) {
+ memset(arg1, 0, arg0 * sizeof(float));
+}
+
+ACTION(FillBuffer) {
+ // Value chosen arbitrarily such that SincResampler resamples it to something
+ // easily representable on all platforms; e.g., using kSampleRateRatio this
+ // becomes 1.81219.
+ memset(arg1, 64, arg0 * sizeof(float));
+}
+
+// Test requesting multiples of ChunkSize() frames results in the proper number
+// of callbacks.
+TEST(SincResamplerTest, ChunkedResample) {
+ MockSource mock_source;
+
+ // Choose a high ratio of input to output samples which will result in quick
+ // exhaustion of SincResampler's internal buffers.
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+
+ static const int kChunks = 2;
+ int max_chunk_size = resampler.ChunkSize() * kChunks;
+ scoped_ptr<float[]> resampled_destination(new float[max_chunk_size]);
+
+ // Verify requesting ChunkSize() frames causes a single callback.
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(1).WillOnce(ClearBuffer());
+ resampler.Resample(resampler.ChunkSize(), resampled_destination.get());
+
+ // Verify requesting kChunks * ChunkSize() frames causes kChunks callbacks.
+ testing::Mock::VerifyAndClear(&mock_source);
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(kChunks).WillRepeatedly(ClearBuffer());
+ resampler.Resample(max_chunk_size, resampled_destination.get());
+}
+
+// Test flush resets the internal state properly.
+TEST(SincResamplerTest, Flush) {
+ MockSource mock_source;
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+ scoped_ptr<float[]> resampled_destination(new float[resampler.ChunkSize()]);
+
+ // Fill the resampler with junk data.
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(1).WillOnce(FillBuffer());
+ resampler.Resample(resampler.ChunkSize() / 2, resampled_destination.get());
+ ASSERT_NE(resampled_destination[0], 0);
+
+ // Flush and request more data, which should all be zeros now.
+ resampler.Flush();
+ testing::Mock::VerifyAndClear(&mock_source);
+ EXPECT_CALL(mock_source, ProvideInput(_, _))
+ .Times(1).WillOnce(ClearBuffer());
+ resampler.Resample(resampler.ChunkSize() / 2, resampled_destination.get());
+ for (int i = 0; i < resampler.ChunkSize() / 2; ++i)
+ ASSERT_FLOAT_EQ(resampled_destination[i], 0);
+}
+
+// Test flush resets the internal state properly.
+TEST(SincResamplerTest, DISABLED_SetRatioBench) {
+ MockSource mock_source;
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 1; i < 10000; ++i)
+ resampler.SetRatio(1.0 / i);
+ double total_time_c_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("SetRatio() took %.2fms.\n", total_time_c_ms);
+}
+
+
+// Define platform independent function name for Convolve* tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define CONVOLVE_FUNC Convolve_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define CONVOLVE_FUNC Convolve_NEON
+#endif
+
+// Ensure various optimized Convolve() methods return the same value. Only run
+// this test if other optimized methods exist, otherwise the default Convolve()
+// will be tested by the parameterized SincResampler tests below.
+#if defined(CONVOLVE_FUNC)
+TEST(SincResamplerTest, Convolve) {
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+
+ // Initialize a dummy resampler.
+ MockSource mock_source;
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+
+ // The optimized Convolve methods are slightly more precise than Convolve_C(),
+ // so comparison must be done using an epsilon.
+ static const double kEpsilon = 0.00000005;
+
+ // Use a kernel from SincResampler as input and kernel data, this has the
+ // benefit of already being properly sized and aligned for Convolve_SSE().
+ double result = resampler.Convolve_C(
+ resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ double result2 = resampler.CONVOLVE_FUNC(
+ resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ EXPECT_NEAR(result2, result, kEpsilon);
+
+ // Test Convolve() w/ unaligned input pointer.
+ result = resampler.Convolve_C(
+ resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ result2 = resampler.CONVOLVE_FUNC(
+ resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ EXPECT_NEAR(result2, result, kEpsilon);
+}
+#endif
+
+// Benchmark for the various Convolve() methods. Make sure to build with
+// branding=Chrome so that DCHECKs are compiled out when benchmarking. Original
+// benchmarks were run with --convolve-iterations=50000000.
+TEST(SincResamplerTest, ConvolveBenchmark) {
+ // Initialize a dummy resampler.
+ MockSource mock_source;
+ SincResampler resampler(
+ kSampleRateRatio, SincResampler::kDefaultRequestSize,
+ base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
+
+ // Retrieve benchmark iterations from command line.
+ int convolve_iterations = 10;
+ std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ kConvolveIterations));
+ if (!iterations.empty())
+ base::StringToInt(iterations, &convolve_iterations);
+
+ printf("Benchmarking %d iterations:\n", convolve_iterations);
+
+ // Benchmark Convolve_C().
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < convolve_iterations; ++i) {
+ resampler.Convolve_C(
+ resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ }
+ double total_time_c_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convolve_C took %.2fms.\n", total_time_c_ms);
+
+#if defined(CONVOLVE_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+
+ // Benchmark with unaligned input pointer.
+ start = base::TimeTicks::HighResNow();
+ for (int j = 0; j < convolve_iterations; ++j) {
+ resampler.CONVOLVE_FUNC(
+ resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ }
+ double total_time_optimized_unaligned_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(CONVOLVE_FUNC) " (unaligned) took %.2fms; which is %.2fx "
+ "faster than Convolve_C.\n", total_time_optimized_unaligned_ms,
+ total_time_c_ms / total_time_optimized_unaligned_ms);
+
+ // Benchmark with aligned input pointer.
+ start = base::TimeTicks::HighResNow();
+ for (int j = 0; j < convolve_iterations; ++j) {
+ resampler.CONVOLVE_FUNC(
+ resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+ resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+ }
+ double total_time_optimized_aligned_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(CONVOLVE_FUNC) " (aligned) took %.2fms; which is %.2fx "
+ "faster than Convolve_C and %.2fx faster than "
+ STRINGIZE(CONVOLVE_FUNC) " (unaligned).\n",
+ total_time_optimized_aligned_ms,
+ total_time_c_ms / total_time_optimized_aligned_ms,
+ total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
+#endif
+}
+
+#undef CONVOLVE_FUNC
+
+// Fake audio source for testing the resampler. Generates a sinusoidal linear
+// chirp (http://en.wikipedia.org/wiki/Chirp) which can be tuned to stress the
+// resampler for the specific sample rate conversion being used.
+class SinusoidalLinearChirpSource {
+ public:
+ SinusoidalLinearChirpSource(int sample_rate,
+ int samples,
+ double max_frequency)
+ : sample_rate_(sample_rate),
+ total_samples_(samples),
+ max_frequency_(max_frequency),
+ current_index_(0) {
+ // Chirp rate.
+ double duration = static_cast<double>(total_samples_) / sample_rate_;
+ k_ = (max_frequency_ - kMinFrequency) / duration;
+ }
+
+ virtual ~SinusoidalLinearChirpSource() {}
+
+ void ProvideInput(int frames, float* destination) {
+ for (int i = 0; i < frames; ++i, ++current_index_) {
+ // Filter out frequencies higher than Nyquist.
+ if (Frequency(current_index_) > 0.5 * sample_rate_) {
+ destination[i] = 0;
+ } else {
+ // Calculate time in seconds.
+ double t = static_cast<double>(current_index_) / sample_rate_;
+
+ // Sinusoidal linear chirp.
+ destination[i] = sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t));
+ }
+ }
+ }
+
+ double Frequency(int position) {
+ return kMinFrequency + position * (max_frequency_ - kMinFrequency)
+ / total_samples_;
+ }
+
+ private:
+ enum {
+ kMinFrequency = 5
+ };
+
+ double sample_rate_;
+ int total_samples_;
+ double max_frequency_;
+ double k_;
+ int current_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
+};
+
+typedef std::tr1::tuple<int, int, double, double> SincResamplerTestData;
+class SincResamplerTest
+ : public testing::TestWithParam<SincResamplerTestData> {
+ public:
+ SincResamplerTest()
+ : input_rate_(std::tr1::get<0>(GetParam())),
+ output_rate_(std::tr1::get<1>(GetParam())),
+ rms_error_(std::tr1::get<2>(GetParam())),
+ low_freq_error_(std::tr1::get<3>(GetParam())) {
+ }
+
+ virtual ~SincResamplerTest() {}
+
+ protected:
+ int input_rate_;
+ int output_rate_;
+ double rms_error_;
+ double low_freq_error_;
+};
+
+// Tests resampling using a given input and output sample rate.
+TEST_P(SincResamplerTest, Resample) {
+ // Make comparisons using one second of data.
+ static const double kTestDurationSecs = 1;
+ int input_samples = kTestDurationSecs * input_rate_;
+ int output_samples = kTestDurationSecs * output_rate_;
+
+ // Nyquist frequency for the input sampling rate.
+ double input_nyquist_freq = 0.5 * input_rate_;
+
+ // Source for data to be resampled.
+ SinusoidalLinearChirpSource resampler_source(
+ input_rate_, input_samples, input_nyquist_freq);
+
+ const double io_ratio = input_rate_ / static_cast<double>(output_rate_);
+ SincResampler resampler(
+ io_ratio, SincResampler::kDefaultRequestSize,
+ base::Bind(&SinusoidalLinearChirpSource::ProvideInput,
+ base::Unretained(&resampler_source)));
+
+ // Force an update to the sample rate ratio to ensure dyanmic sample rate
+ // changes are working correctly.
+ scoped_ptr<float[]> kernel(new float[SincResampler::kKernelStorageSize]);
+ memcpy(kernel.get(), resampler.get_kernel_for_testing(),
+ SincResampler::kKernelStorageSize);
+ resampler.SetRatio(M_PI);
+ ASSERT_NE(0, memcmp(kernel.get(), resampler.get_kernel_for_testing(),
+ SincResampler::kKernelStorageSize));
+ resampler.SetRatio(io_ratio);
+ ASSERT_EQ(0, memcmp(kernel.get(), resampler.get_kernel_for_testing(),
+ SincResampler::kKernelStorageSize));
+
+ // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
+ // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
+ scoped_ptr<float[]> resampled_destination(new float[output_samples]);
+ scoped_ptr<float[]> pure_destination(new float[output_samples]);
+
+ // Generate resampled signal.
+ resampler.Resample(output_samples, resampled_destination.get());
+
+ // Generate pure signal.
+ SinusoidalLinearChirpSource pure_source(
+ output_rate_, output_samples, input_nyquist_freq);
+ pure_source.ProvideInput(output_samples, pure_destination.get());
+
+ // Range of the Nyquist frequency (0.5 * min(input rate, output_rate)) which
+ // we refer to as low and high.
+ static const double kLowFrequencyNyquistRange = 0.7;
+ static const double kHighFrequencyNyquistRange = 0.9;
+
+ // Calculate Root-Mean-Square-Error and maximum error for the resampling.
+ double sum_of_squares = 0;
+ double low_freq_max_error = 0;
+ double high_freq_max_error = 0;
+ int minimum_rate = std::min(input_rate_, output_rate_);
+ double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
+ double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
+ for (int i = 0; i < output_samples; ++i) {
+ double error = fabs(resampled_destination[i] - pure_destination[i]);
+
+ if (pure_source.Frequency(i) < low_frequency_range) {
+ if (error > low_freq_max_error)
+ low_freq_max_error = error;
+ } else if (pure_source.Frequency(i) < high_frequency_range) {
+ if (error > high_freq_max_error)
+ high_freq_max_error = error;
+ }
+ // TODO(dalecurtis): Sanity check frequencies > kHighFrequencyNyquistRange.
+
+ sum_of_squares += error * error;
+ }
+
+ double rms_error = sqrt(sum_of_squares / output_samples);
+
+ // Convert each error to dbFS.
+ #define DBFS(x) 20 * log10(x)
+ rms_error = DBFS(rms_error);
+ low_freq_max_error = DBFS(low_freq_max_error);
+ high_freq_max_error = DBFS(high_freq_max_error);
+
+ EXPECT_LE(rms_error, rms_error_);
+ EXPECT_LE(low_freq_max_error, low_freq_error_);
+
+ // All conversions currently have a high frequency error around -6 dbFS.
+ static const double kHighFrequencyMaxError = -6.02;
+ EXPECT_LE(high_freq_max_error, kHighFrequencyMaxError);
+}
+
+// Almost all conversions have an RMS error of around -14 dbFS.
+static const double kResamplingRMSError = -14.58;
+
+// Thresholds chosen arbitrarily based on what each resampling reported during
+// testing. All thresholds are in dbFS, http://en.wikipedia.org/wiki/DBFS.
+INSTANTIATE_TEST_CASE_P(
+ SincResamplerTest, SincResamplerTest, testing::Values(
+ // To 44.1kHz
+ std::tr1::make_tuple(8000, 44100, kResamplingRMSError, -62.73),
+ std::tr1::make_tuple(11025, 44100, kResamplingRMSError, -72.19),
+ std::tr1::make_tuple(16000, 44100, kResamplingRMSError, -62.54),
+ std::tr1::make_tuple(22050, 44100, kResamplingRMSError, -73.53),
+ std::tr1::make_tuple(32000, 44100, kResamplingRMSError, -63.32),
+ std::tr1::make_tuple(44100, 44100, kResamplingRMSError, -73.53),
+ std::tr1::make_tuple(48000, 44100, -15.01, -64.04),
+ std::tr1::make_tuple(96000, 44100, -18.49, -25.51),
+ std::tr1::make_tuple(192000, 44100, -20.50, -13.31),
+
+ // To 48kHz
+ std::tr1::make_tuple(8000, 48000, kResamplingRMSError, -63.43),
+ std::tr1::make_tuple(11025, 48000, kResamplingRMSError, -62.61),
+ std::tr1::make_tuple(16000, 48000, kResamplingRMSError, -63.96),
+ std::tr1::make_tuple(22050, 48000, kResamplingRMSError, -62.42),
+ std::tr1::make_tuple(32000, 48000, kResamplingRMSError, -64.04),
+ std::tr1::make_tuple(44100, 48000, kResamplingRMSError, -62.63),
+ std::tr1::make_tuple(48000, 48000, kResamplingRMSError, -73.52),
+ std::tr1::make_tuple(96000, 48000, -18.40, -28.44),
+ std::tr1::make_tuple(192000, 48000, -20.43, -14.11),
+
+ // To 96kHz
+ std::tr1::make_tuple(8000, 96000, kResamplingRMSError, -63.19),
+ std::tr1::make_tuple(11025, 96000, kResamplingRMSError, -62.61),
+ std::tr1::make_tuple(16000, 96000, kResamplingRMSError, -63.39),
+ std::tr1::make_tuple(22050, 96000, kResamplingRMSError, -62.42),
+ std::tr1::make_tuple(32000, 96000, kResamplingRMSError, -63.95),
+ std::tr1::make_tuple(44100, 96000, kResamplingRMSError, -62.63),
+ std::tr1::make_tuple(48000, 96000, kResamplingRMSError, -73.52),
+ std::tr1::make_tuple(96000, 96000, kResamplingRMSError, -73.52),
+ std::tr1::make_tuple(192000, 96000, kResamplingRMSError, -28.41),
+
+ // To 192kHz
+ std::tr1::make_tuple(8000, 192000, kResamplingRMSError, -63.10),
+ std::tr1::make_tuple(11025, 192000, kResamplingRMSError, -62.61),
+ std::tr1::make_tuple(16000, 192000, kResamplingRMSError, -63.14),
+ std::tr1::make_tuple(22050, 192000, kResamplingRMSError, -62.42),
+ std::tr1::make_tuple(32000, 192000, kResamplingRMSError, -63.38),
+ std::tr1::make_tuple(44100, 192000, kResamplingRMSError, -62.63),
+ std::tr1::make_tuple(48000, 192000, kResamplingRMSError, -73.44),
+ std::tr1::make_tuple(96000, 192000, kResamplingRMSError, -73.52),
+ std::tr1::make_tuple(192000, 192000, kResamplingRMSError, -73.52)));
+
+} // namespace media
diff --git a/chromium/media/base/stream_parser.cc b/chromium/media/base/stream_parser.cc
new file mode 100644
index 00000000000..12409194fed
--- /dev/null
+++ b/chromium/media/base/stream_parser.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/stream_parser.h"
+
+namespace media {
+
+StreamParser::StreamParser() {}
+
+StreamParser::~StreamParser() {}
+
+} // namespace media
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
new file mode 100644
index 00000000000..a0fbb71a924
--- /dev/null
+++ b/chromium/media/base/stream_parser.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_STREAM_PARSER_H_
+#define MEDIA_BASE_STREAM_PARSER_H_
+
+#include <deque>
+#include <string>
+
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/media_log.h"
+#include "media/base/text_track.h"
+
+namespace media {
+
+class AudioDecoderConfig;
+class StreamParserBuffer;
+class VideoDecoderConfig;
+
+// Abstract interface for parsing media byte streams.
+class MEDIA_EXPORT StreamParser {
+ public:
+ typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+
+ StreamParser();
+ virtual ~StreamParser();
+
+ // Indicates completion of parser initialization.
+ // First parameter - Indicates initialization success. Set to true if
+ // initialization was successful. False if an error
+ // occurred.
+ // Second parameter - Indicates the stream duration. Only contains a valid
+ // value if the first parameter is true.
+ typedef base::Callback<void(bool, base::TimeDelta)> InitCB;
+
+ // Indicates when new stream configurations have been parsed.
+ // First parameter - The new audio configuration. If the config is not valid
+ // then it means that there isn't an audio stream.
+ // Second parameter - The new video configuration. If the config is not valid
+ // then it means that there isn't an audio stream.
+ // Return value - True if the new configurations are accepted.
+ // False if the new configurations are not supported
+ // and indicates that a parsing error should be signalled.
+ typedef base::Callback<bool(const AudioDecoderConfig&,
+ const VideoDecoderConfig&)> NewConfigCB;
+
+ // New stream buffers have been parsed.
+ // First parameter - A queue of newly parsed audio buffers.
+ // Second parameter - A queue of newly parsed video buffers.
+ // Return value - True indicates that the buffers are accepted.
+ // False if something was wrong with the buffers and a parsing
+ // error should be signalled.
+ typedef base::Callback<bool(const BufferQueue&,
+ const BufferQueue&)> NewBuffersCB;
+
+ // New stream buffers of inband text have been parsed.
+ // First parameter - The text track to which these cues will be added.
+ // Second parameter - A queue of newly parsed buffers.
+ // Return value - True indicates that the buffers are accepted.
+ // False if something was wrong with the buffers and a parsing
+ // error should be signalled.
+ typedef base::Callback<bool(TextTrack*, const BufferQueue&)> NewTextBuffersCB;
+
+ // Signals the beginning of a new media segment.
+ typedef base::Callback<void()> NewMediaSegmentCB;
+
+ // A new potentially encrypted stream has been parsed.
+ // First parameter - The type of the initialization data associated with the
+ // stream.
+ // Second parameter - The initialization data associated with the stream.
+ // Third parameter - Number of bytes of the initialization data.
+ typedef base::Callback<void(const std::string&,
+ scoped_ptr<uint8[]>, int)> NeedKeyCB;
+
+ // Initialize the parser with necessary callbacks. Must be called before any
+ // data is passed to Parse(). |init_cb| will be called once enough data has
+ // been parsed to determine the initial stream configurations, presentation
+ // start time, and duration.
+ virtual void Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ const NewTextBuffersCB& text_cb,
+ const NeedKeyCB& need_key_cb,
+ const AddTextTrackCB& add_text_track_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const LogCB& log_cb) = 0;
+
+ // Called when a seek occurs. This flushes the current parser state
+ // and puts the parser in a state where it can receive data for the new seek
+ // point.
+ virtual void Flush() = 0;
+
+ // Called when there is new data to parse.
+ //
+ // Returns true if the parse succeeds.
+ virtual bool Parse(const uint8* buf, int size) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StreamParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_STREAM_PARSER_H_
diff --git a/chromium/media/base/stream_parser_buffer.cc b/chromium/media/base/stream_parser_buffer.cc
new file mode 100644
index 00000000000..bb46ef516ec
--- /dev/null
+++ b/chromium/media/base/stream_parser_buffer.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/stream_parser_buffer.h"
+
+#include "base/logging.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+scoped_refptr<StreamParserBuffer> StreamParserBuffer::CreateEOSBuffer() {
+ return make_scoped_refptr(new StreamParserBuffer(NULL, 0, NULL, 0, false));
+}
+
+scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
+ const uint8* data, int data_size, bool is_keyframe) {
+ return make_scoped_refptr(
+ new StreamParserBuffer(data, data_size, NULL, 0, is_keyframe));
+}
+
+scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
+ const uint8* data, int data_size,
+ const uint8* side_data, int side_data_size, bool is_keyframe) {
+ return make_scoped_refptr(
+ new StreamParserBuffer(data, data_size, side_data, side_data_size,
+ is_keyframe));
+}
+
+base::TimeDelta StreamParserBuffer::GetDecodeTimestamp() const {
+ if (decode_timestamp_ == kNoTimestamp())
+ return timestamp();
+ return decode_timestamp_;
+}
+
+void StreamParserBuffer::SetDecodeTimestamp(const base::TimeDelta& timestamp) {
+ decode_timestamp_ = timestamp;
+}
+
+StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
+ const uint8* side_data,
+ int side_data_size, bool is_keyframe)
+ : DecoderBuffer(data, data_size, side_data, side_data_size),
+ is_keyframe_(is_keyframe),
+ decode_timestamp_(kNoTimestamp()),
+ config_id_(kInvalidConfigId) {
+ // TODO(scherkus): Should DataBuffer constructor accept a timestamp and
+ // duration to force clients to set them? Today they end up being zero which
+ // is both a common and valid value and could lead to bugs.
+ if (data) {
+ set_duration(kNoTimestamp());
+ }
+}
+
+StreamParserBuffer::~StreamParserBuffer() {
+}
+
+int StreamParserBuffer::GetConfigId() const {
+ return config_id_;
+}
+
+void StreamParserBuffer::SetConfigId(int config_id) {
+ config_id_ = config_id;
+}
+
+} // namespace media
diff --git a/chromium/media/base/stream_parser_buffer.h b/chromium/media/base/stream_parser_buffer.h
new file mode 100644
index 00000000000..8899f11216d
--- /dev/null
+++ b/chromium/media/base/stream_parser_buffer.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_STREAM_PARSER_BUFFER_H_
+#define MEDIA_BASE_STREAM_PARSER_BUFFER_H_
+
+#include "media/base/decoder_buffer.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
+ public:
+ // Value used to signal an invalid decoder config ID.
+ enum { kInvalidConfigId = -1 };
+
+ static scoped_refptr<StreamParserBuffer> CreateEOSBuffer();
+ static scoped_refptr<StreamParserBuffer> CopyFrom(
+ const uint8* data, int data_size, bool is_keyframe);
+ static scoped_refptr<StreamParserBuffer> CopyFrom(
+ const uint8* data, int data_size,
+ const uint8* side_data, int side_data_size, bool is_keyframe);
+ bool IsKeyframe() const { return is_keyframe_; }
+
+ // Decode timestamp. If not explicitly set, or set to kNoTimestamp(), the
+ // value will be taken from the normal timestamp.
+ base::TimeDelta GetDecodeTimestamp() const;
+ void SetDecodeTimestamp(const base::TimeDelta& timestamp);
+
+ // Gets/sets the ID of the decoder config associated with this
+ // buffer.
+ int GetConfigId() const;
+ void SetConfigId(int config_id);
+
+ private:
+ StreamParserBuffer(const uint8* data, int data_size,
+ const uint8* side_data, int side_data_size,
+ bool is_keyframe);
+ virtual ~StreamParserBuffer();
+
+ bool is_keyframe_;
+ base::TimeDelta decode_timestamp_;
+ int config_id_;
+ DISALLOW_COPY_AND_ASSIGN(StreamParserBuffer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_STREAM_PARSER_BUFFER_H_
diff --git a/chromium/media/base/test_data_util.cc b/chromium/media/base/test_data_util.cc
new file mode 100644
index 00000000000..55e82fc8635
--- /dev/null
+++ b/chromium/media/base/test_data_util.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/test_data_util.h"
+
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "media/base/decoder_buffer.h"
+
+namespace media {
+
+base::FilePath GetTestDataFilePath(const std::string& name) {
+ base::FilePath file_path;
+ CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
+
+ file_path = file_path.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
+ .AppendASCII(name);
+ return file_path;
+}
+
+scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
+ base::FilePath file_path;
+ CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
+
+ file_path = file_path.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test")).Append(FILE_PATH_LITERAL("data"))
+ .AppendASCII(name);
+
+ int64 tmp = 0;
+ CHECK(file_util::GetFileSize(file_path, &tmp))
+ << "Failed to get file size for '" << name << "'";
+
+ int file_size = static_cast<int>(tmp);
+
+ scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(file_size));
+ CHECK_EQ(file_size,
+ file_util::ReadFile(
+ file_path, reinterpret_cast<char*>(buffer->writable_data()),
+ file_size)) << "Failed to read '" << name << "'";
+
+ return buffer;
+}
+
+} // namespace media
diff --git a/chromium/media/base/test_data_util.h b/chromium/media/base/test_data_util.h
new file mode 100644
index 00000000000..8d51e96c736
--- /dev/null
+++ b/chromium/media/base/test_data_util.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEST_DATA_UTIL_H_
+#define MEDIA_BASE_TEST_DATA_UTIL_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/files/file_path.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+
+namespace media {
+
+class DecoderBuffer;
+
+// Returns a file path for a file in the media/test/data directory.
+base::FilePath GetTestDataFilePath(const std::string& name);
+
+// Reads a test file from media/test/data directory and stores it in
+// a DecoderBuffer. Use DecoderBuffer vs DataBuffer to ensure no matter
+// what a test does, it's safe to use FFmpeg methods.
+//
+// |name| - The name of the file.
+// |buffer| - The contents of the file.
+scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name);
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEST_DATA_UTIL_H_
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
new file mode 100644
index 00000000000..43c5cfac748
--- /dev/null
+++ b/chromium/media/base/test_helpers.cc
@@ -0,0 +1,283 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/test_helpers.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pickle.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/bind_to_loop.h"
+#include "media/base/decoder_buffer.h"
+#include "ui/gfx/rect.h"
+
+using ::testing::_;
+using ::testing::StrictMock;
+
+namespace media {
+
+// Utility mock for testing methods expecting Closures and PipelineStatusCBs.
+class MockCallback : public base::RefCountedThreadSafe<MockCallback> {
+ public:
+ MockCallback();
+ MOCK_METHOD0(Run, void());
+ MOCK_METHOD1(RunWithStatus, void(PipelineStatus));
+
+ protected:
+ friend class base::RefCountedThreadSafe<MockCallback>;
+ virtual ~MockCallback();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+MockCallback::MockCallback() {}
+MockCallback::~MockCallback() {}
+
+base::Closure NewExpectedClosure() {
+ StrictMock<MockCallback>* callback = new StrictMock<MockCallback>();
+ EXPECT_CALL(*callback, Run());
+ return base::Bind(&MockCallback::Run, callback);
+}
+
+PipelineStatusCB NewExpectedStatusCB(PipelineStatus status) {
+ StrictMock<MockCallback>* callback = new StrictMock<MockCallback>();
+ EXPECT_CALL(*callback, RunWithStatus(status));
+ return base::Bind(&MockCallback::RunWithStatus, callback);
+}
+
+WaitableMessageLoopEvent::WaitableMessageLoopEvent()
+ : message_loop_(base::MessageLoop::current()),
+ signaled_(false),
+ status_(PIPELINE_OK) {
+ DCHECK(message_loop_);
+}
+
+WaitableMessageLoopEvent::~WaitableMessageLoopEvent() {}
+
+base::Closure WaitableMessageLoopEvent::GetClosure() {
+ DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
+ &WaitableMessageLoopEvent::OnCallback, base::Unretained(this),
+ PIPELINE_OK));
+}
+
+PipelineStatusCB WaitableMessageLoopEvent::GetPipelineStatusCB() {
+ DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
+ &WaitableMessageLoopEvent::OnCallback, base::Unretained(this)));
+}
+
+void WaitableMessageLoopEvent::RunAndWait() {
+ RunAndWaitForStatus(PIPELINE_OK);
+}
+
+void WaitableMessageLoopEvent::RunAndWaitForStatus(PipelineStatus expected) {
+ DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ if (signaled_) {
+ EXPECT_EQ(expected, status_);
+ return;
+ }
+
+ base::Timer timer(false, false);
+ timer.Start(FROM_HERE, TestTimeouts::action_timeout(), base::Bind(
+ &WaitableMessageLoopEvent::OnTimeout, base::Unretained(this)));
+
+ message_loop_->Run();
+ EXPECT_TRUE(signaled_);
+ EXPECT_EQ(expected, status_);
+}
+
+void WaitableMessageLoopEvent::OnCallback(PipelineStatus status) {
+ DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ signaled_ = true;
+ status_ = status;
+ message_loop_->QuitWhenIdle();
+}
+
+void WaitableMessageLoopEvent::OnTimeout() {
+ DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ ADD_FAILURE() << "Timed out waiting for message loop to quit";
+ message_loop_->QuitWhenIdle();
+}
+
+static VideoDecoderConfig GetTestConfig(VideoCodec codec,
+ gfx::Size coded_size,
+ bool is_encrypted) {
+ gfx::Rect visible_rect(coded_size.width(), coded_size.height());
+ gfx::Size natural_size = coded_size;
+
+ return VideoDecoderConfig(codec, VIDEO_CODEC_PROFILE_UNKNOWN,
+ VideoFrame::YV12, coded_size, visible_rect, natural_size,
+ NULL, 0, is_encrypted);
+}
+
+static const gfx::Size kNormalSize(320, 240);
+static const gfx::Size kLargeSize(640, 480);
+
+VideoDecoderConfig TestVideoConfig::Invalid() {
+ return GetTestConfig(kUnknownVideoCodec, kNormalSize, false);
+}
+
+VideoDecoderConfig TestVideoConfig::Normal() {
+ return GetTestConfig(kCodecVP8, kNormalSize, false);
+}
+
+VideoDecoderConfig TestVideoConfig::NormalEncrypted() {
+ return GetTestConfig(kCodecVP8, kNormalSize, true);
+}
+
+VideoDecoderConfig TestVideoConfig::Large() {
+ return GetTestConfig(kCodecVP8, kLargeSize, false);
+}
+
+VideoDecoderConfig TestVideoConfig::LargeEncrypted() {
+ return GetTestConfig(kCodecVP8, kLargeSize, true);
+}
+
+gfx::Size TestVideoConfig::NormalCodedSize() {
+ return kNormalSize;
+}
+
+gfx::Size TestVideoConfig::LargeCodedSize() {
+ return kLargeSize;
+}
+
+template <class T>
+scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time,
+ base::TimeDelta duration) {
+ DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
+ format == kSampleFormatS32 || format == kSampleFormatF32);
+
+ // Create a block of memory with values:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Since this is interleaved data, channel 0 data will be:
+ // start
+ // start + channels * increment
+ // start + 2 * channels * increment, ...
+ int buffer_size = frames * channels * sizeof(T);
+ scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
+ uint8* data[] = { memory.get() };
+ T* buffer = reinterpret_cast<T*>(memory.get());
+ for (int i = 0; i < frames * channels; ++i) {
+ buffer[i] = start;
+ start += increment;
+ }
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data, start_time, duration);
+}
+
+template <class T>
+scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time,
+ base::TimeDelta duration) {
+ DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
+
+ // Create multiple blocks of data, one for each channel.
+ // Values in channel 0 will be:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Values in channel 1 will be:
+ // start + frames * increment
+ // start + (frames + 1) * increment
+ // start + (frames + 2) * increment, ...
+ int buffer_size = frames * sizeof(T);
+ scoped_ptr<uint8*[]> data(new uint8*[channels]);
+ scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
+ for (int i = 0; i < channels; ++i) {
+ data.get()[i] = memory.get() + i * buffer_size;
+ T* buffer = reinterpret_cast<T*>(data.get()[i]);
+ for (int j = 0; j < frames; ++j) {
+ buffer[j] = start;
+ start += increment;
+ }
+ }
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data.get(), start_time, duration);
+}
+
+// Instantiate all the types of MakeInterleavedAudioBuffer() and
+// MakePlanarAudioBuffer() needed.
+
+#define DEFINE_INTERLEAVED_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time, \
+ base::TimeDelta duration)
+DEFINE_INTERLEAVED_INSTANCE(uint8);
+DEFINE_INTERLEAVED_INSTANCE(int16);
+DEFINE_INTERLEAVED_INSTANCE(int32);
+DEFINE_INTERLEAVED_INSTANCE(float);
+
+#define DEFINE_PLANAR_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time, \
+ base::TimeDelta duration);
+DEFINE_PLANAR_INSTANCE(int16);
+DEFINE_PLANAR_INSTANCE(float);
+
+static const char kFakeVideoBufferHeader[] = "FakeVideoBufferForTest";
+
+scoped_refptr<DecoderBuffer> CreateFakeVideoBufferForTest(
+ const VideoDecoderConfig& config,
+ base::TimeDelta timestamp, base::TimeDelta duration) {
+ Pickle pickle;
+ pickle.WriteString(kFakeVideoBufferHeader);
+ pickle.WriteInt(config.coded_size().width());
+ pickle.WriteInt(config.coded_size().height());
+ pickle.WriteInt64(timestamp.InMilliseconds());
+
+ scoped_refptr<DecoderBuffer> buffer = DecoderBuffer::CopyFrom(
+ static_cast<const uint8*>(pickle.data()),
+ static_cast<int>(pickle.size()));
+ buffer->set_timestamp(timestamp);
+ buffer->set_duration(duration);
+
+ return buffer;
+}
+
+bool VerifyFakeVideoBufferForTest(
+ const scoped_refptr<DecoderBuffer>& buffer,
+ const VideoDecoderConfig& config) {
+ // Check if the input |buffer| matches the |config|.
+ PickleIterator pickle(Pickle(reinterpret_cast<const char*>(buffer->data()),
+ buffer->data_size()));
+ std::string header;
+ int width = 0;
+ int height = 0;
+ bool success = pickle.ReadString(&header) && pickle.ReadInt(&width) &&
+ pickle.ReadInt(&height);
+ return (success && header == kFakeVideoBufferHeader &&
+ width == config.coded_size().width() &&
+ height == config.coded_size().height());
+}
+
+} // namespace media
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
new file mode 100644
index 00000000000..872d08d6f8d
--- /dev/null
+++ b/chromium/media/base/test_helpers.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEST_HELPERS_H_
+#define MEDIA_BASE_TEST_HELPERS_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/sample_format.h"
+#include "media/base/video_decoder_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+class MessageLoop;
+class TimeDelta;
+}
+
+namespace media {
+
+class AudioBuffer;
+class DecoderBuffer;
+
+// Return a callback that expects to be run once.
+base::Closure NewExpectedClosure();
+PipelineStatusCB NewExpectedStatusCB(PipelineStatus status);
+
+// Helper class for running a message loop until a callback has run. Useful for
+// testing classes that run on more than a single thread.
+//
+// Events are intended for single use and cannot be reset.
+class WaitableMessageLoopEvent {
+ public:
+ WaitableMessageLoopEvent();
+ ~WaitableMessageLoopEvent();
+
+ // Returns a thread-safe closure that will signal |this| when executed.
+ base::Closure GetClosure();
+ PipelineStatusCB GetPipelineStatusCB();
+
+ // Runs the current message loop until |this| has been signaled.
+ //
+ // Fails the test if the timeout is reached.
+ void RunAndWait();
+
+ // Runs the current message loop until |this| has been signaled and asserts
+ // that the |expected| status was received.
+ //
+ // Fails the test if the timeout is reached.
+ void RunAndWaitForStatus(PipelineStatus expected);
+
+ private:
+ void OnCallback(PipelineStatus status);
+ void OnTimeout();
+
+ base::MessageLoop* message_loop_;
+ bool signaled_;
+ PipelineStatus status_;
+
+ DISALLOW_COPY_AND_ASSIGN(WaitableMessageLoopEvent);
+};
+
+// Provides pre-canned VideoDecoderConfig. These types are used for tests that
+// don't care about detailed parameters of the config.
+class TestVideoConfig {
+ public:
+ // Returns a configuration that is invalid.
+ static VideoDecoderConfig Invalid();
+
+ static VideoDecoderConfig Normal();
+ static VideoDecoderConfig NormalEncrypted();
+
+ // Returns a configuration that is larger in dimensions than Normal().
+ static VideoDecoderConfig Large();
+ static VideoDecoderConfig LargeEncrypted();
+
+ // Returns coded size for Normal and Large config.
+ static gfx::Size NormalCodedSize();
+ static gfx::Size LargeCodedSize();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TestVideoConfig);
+};
+
+// Create an AudioBuffer containing |frames| frames of data, where each sample
+// is of type T. Each frame will have the data from |channels| channels
+// interleaved. |start| and |increment| are used to specify the values for the
+// samples. Since this is interleaved data, channel 0 data will be:
+// |start|
+// |start| + |channels| * |increment|
+// |start| + 2 * |channels| * |increment|, and so on.
+// Data for subsequent channels is similar. No check is done that |format|
+// requires data to be of type T, but it is verified that |format| is an
+// interleaved format.
+//
+// |start_time| will be used as the start time for the samples. |duration| is
+// the duration.
+template <class T>
+scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time,
+ base::TimeDelta duration);
+
+// Create an AudioBuffer containing |frames| frames of data, where each sample
+// is of type T. Since this is planar data, there will be a block for each of
+// |channel| channels. |start| and |increment| are used to specify the values
+// for the samples, which are created in channel order. Since this is planar
+// data, channel 0 data will be:
+// |start|
+// |start| + |increment|
+// |start| + 2 * |increment|, and so on.
+// Data for channel 1 will follow where channel 0 ends. Subsequent channels are
+// similar. No check is done that |format| requires data to be of type T, but it
+// is verified that |format| is a planar format.
+//
+// |start_time| will be used as the start time for the samples. |duration| is
+// the duration.
+template <class T>
+scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time,
+ base::TimeDelta duration);
+
+// Create a fake video DecoderBuffer for testing purpose. The buffer contains
+// part of video decoder config info embedded so that the testing code can do
+// some sanity check.
+scoped_refptr<DecoderBuffer> CreateFakeVideoBufferForTest(
+ const VideoDecoderConfig& config,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration);
+
+// Verify if a fake video DecoderBuffer is valid.
+bool VerifyFakeVideoBufferForTest(const scoped_refptr<DecoderBuffer>& buffer,
+ const VideoDecoderConfig& config);
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEST_HELPERS_H_
diff --git a/chromium/media/base/text_track.h b/chromium/media/base/text_track.h
new file mode 100644
index 00000000000..01a2ed727f9
--- /dev/null
+++ b/chromium/media/base/text_track.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_TRACK_H_
+#define MEDIA_BASE_TEXT_TRACK_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+
+namespace media {
+
+// Specifies the varieties of text tracks.
+enum TextKind {
+ kTextSubtitles,
+ kTextCaptions,
+ kTextDescriptions,
+ kTextMetadata,
+ kTextNone
+};
+
+class TextTrack {
+ public:
+ virtual ~TextTrack() {}
+ virtual void addWebVTTCue(const base::TimeDelta& start,
+ const base::TimeDelta& end,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings) = 0;
+};
+
+typedef base::Callback<scoped_ptr<TextTrack>
+ (TextKind kind,
+ const std::string& label,
+ const std::string& language)> AddTextTrackCB;
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_TRACK_H_
diff --git a/chromium/media/base/vector_math.cc b/chromium/media/base/vector_math.cc
new file mode 100644
index 00000000000..ac6de92ad8d
--- /dev/null
+++ b/chromium/media/base/vector_math.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/vector_math.h"
+#include "media/base/vector_math_testing.h"
+
+#include "base/cpu.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#include <arm_neon.h>
+#endif
+
+namespace media {
+namespace vector_math {
+
+// If we know the minimum architecture at compile time, avoid CPU detection.
+// Force NaCl code to use C routines since (at present) nothing there uses these
+// methods and plumbing the -msse built library is non-trivial. iOS lies about
+// its architecture, so we also need to exclude it here.
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL) && !defined(OS_IOS)
+#if defined(__SSE__)
+#define FMAC_FUNC FMAC_SSE
+#define FMUL_FUNC FMUL_SSE
+void Initialize() {}
+#else
+// X86 CPU detection required. Functions will be set by Initialize().
+// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
+#define FMAC_FUNC g_fmac_proc_
+#define FMUL_FUNC g_fmul_proc_
+
+typedef void (*MathProc)(const float src[], float scale, int len, float dest[]);
+static MathProc g_fmac_proc_ = NULL;
+static MathProc g_fmul_proc_ = NULL;
+
+void Initialize() {
+ CHECK(!g_fmac_proc_);
+ CHECK(!g_fmul_proc_);
+ const bool kUseSSE = base::CPU().has_sse();
+ g_fmac_proc_ = kUseSSE ? FMAC_SSE : FMAC_C;
+ g_fmul_proc_ = kUseSSE ? FMUL_SSE : FMUL_C;
+}
+#endif
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define FMAC_FUNC FMAC_NEON
+#define FMUL_FUNC FMUL_NEON
+void Initialize() {}
+#else
+// Unknown architecture.
+#define FMAC_FUNC FMAC_C
+#define FMUL_FUNC FMUL_C
+void Initialize() {}
+#endif
+
+void FMAC(const float src[], float scale, int len, float dest[]) {
+ // Ensure |src| and |dest| are 16-byte aligned.
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(dest) & (kRequiredAlignment - 1));
+ return FMAC_FUNC(src, scale, len, dest);
+}
+
+void FMAC_C(const float src[], float scale, int len, float dest[]) {
+ for (int i = 0; i < len; ++i)
+ dest[i] += src[i] * scale;
+}
+
+void FMUL(const float src[], float scale, int len, float dest[]) {
+ // Ensure |src| and |dest| are 16-byte aligned.
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(dest) & (kRequiredAlignment - 1));
+ return FMUL_FUNC(src, scale, len, dest);
+}
+
+void FMUL_C(const float src[], float scale, int len, float dest[]) {
+ for (int i = 0; i < len; ++i)
+ dest[i] = src[i] * scale;
+}
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+void FMAC_NEON(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ float32x4_t m_scale = vmovq_n_f32(scale);
+ for (int i = 0; i < last_index; i += 4) {
+ vst1q_f32(dest + i, vmlaq_f32(
+ vld1q_f32(dest + i), vld1q_f32(src + i), m_scale));
+ }
+
+ // Handle any remaining values that wouldn't fit in an NEON pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] += src[i] * scale;
+}
+
+void FMUL_NEON(const float src[], float scale, int len, float dest[]) {
+ const int rem = len % 4;
+ const int last_index = len - rem;
+ float32x4_t m_scale = vmovq_n_f32(scale);
+ for (int i = 0; i < last_index; i += 4)
+ vst1q_f32(dest + i, vmulq_f32(vld1q_f32(src + i), m_scale));
+
+ // Handle any remaining values that wouldn't fit in an NEON pass.
+ for (int i = last_index; i < len; ++i)
+ dest[i] = src[i] * scale;
+}
+#endif
+
+} // namespace vector_math
+} // namespace media
diff --git a/chromium/media/base/vector_math.h b/chromium/media/base/vector_math.h
new file mode 100644
index 00000000000..4764f0b7e3e
--- /dev/null
+++ b/chromium/media/base/vector_math.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VECTOR_MATH_H_
+#define MEDIA_BASE_VECTOR_MATH_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+namespace vector_math {
+
+// Required alignment for inputs and outputs to all vector math functions
+enum { kRequiredAlignment = 16 };
+
+// Selects runtime specific optimizations such as SSE. Must be called prior to
+// calling FMAC() or FMUL(). Called during media library initialization; most
+// users should never have to call this.
+MEDIA_EXPORT void Initialize();
+
+// Multiply each element of |src| (up to |len|) by |scale| and add to |dest|.
+// |src| and |dest| must be aligned by kRequiredAlignment.
+MEDIA_EXPORT void FMAC(const float src[], float scale, int len, float dest[]);
+
+// Multiply each element of |src| by |scale| and store in |dest|. |src| and
+// |dest| must be aligned by kRequiredAlignment.
+MEDIA_EXPORT void FMUL(const float src[], float scale, int len, float dest[]);
+
+} // namespace vector_math
+} // namespace media
+
+#endif // MEDIA_BASE_VECTOR_MATH_H_
diff --git a/chromium/media/base/vector_math_testing.h b/chromium/media/base/vector_math_testing.h
new file mode 100644
index 00000000000..02d14f807c5
--- /dev/null
+++ b/chromium/media/base/vector_math_testing.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VECTOR_MATH_TESTING_H_
+#define MEDIA_BASE_VECTOR_MATH_TESTING_H_
+
+#include "build/build_config.h"
+#include "media/base/media_export.h"
+
+namespace media {
+namespace vector_math {
+
+// Optimized versions exposed for testing. See vector_math.h for details.
+MEDIA_EXPORT void FMAC_C(const float src[], float scale, int len, float dest[]);
+MEDIA_EXPORT void FMUL_C(const float src[], float scale, int len, float dest[]);
+
+#if defined(ARCH_CPU_X86_FAMILY)
+MEDIA_EXPORT void FMAC_SSE(const float src[], float scale, int len,
+ float dest[]);
+MEDIA_EXPORT void FMUL_SSE(const float src[], float scale, int len,
+ float dest[]);
+#endif
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+MEDIA_EXPORT void FMAC_NEON(const float src[], float scale, int len,
+ float dest[]);
+MEDIA_EXPORT void FMUL_NEON(const float src[], float scale, int len,
+ float dest[]);
+#endif
+
+} // namespace vector_math
+} // namespace media
+
+#endif // MEDIA_BASE_VECTOR_MATH_TESTING_H_
diff --git a/chromium/media/base/vector_math_unittest.cc b/chromium/media/base/vector_math_unittest.cc
new file mode 100644
index 00000000000..2c7740142cb
--- /dev/null
+++ b/chromium/media/base/vector_math_unittest.cc
@@ -0,0 +1,291 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+#include <cmath>
+
+#include "base/command_line.h"
+#include "base/cpu.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringize_macros.h"
+#include "base/time/time.h"
+#include "media/base/vector_math.h"
+#include "media/base/vector_math_testing.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeTicks;
+using std::fill;
+
+// Command line switch for runtime adjustment of benchmark iterations.
+static const char kBenchmarkIterations[] = "vector-math-iterations";
+static const int kDefaultIterations = 10;
+
+// Default test values.
+static const float kScale = 0.5;
+static const float kInputFillValue = 1.0;
+static const float kOutputFillValue = 3.0;
+
+namespace media {
+
+class VectorMathTest : public testing::Test {
+ public:
+ static const int kVectorSize = 8192;
+
+ VectorMathTest() {
+ // Initialize input and output vectors.
+ input_vector.reset(static_cast<float*>(base::AlignedAlloc(
+ sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
+ output_vector.reset(static_cast<float*>(base::AlignedAlloc(
+ sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
+ }
+
+ void FillTestVectors(float input, float output) {
+ // Setup input and output vectors.
+ fill(input_vector.get(), input_vector.get() + kVectorSize, input);
+ fill(output_vector.get(), output_vector.get() + kVectorSize, output);
+ }
+
+ void VerifyOutput(float value) {
+ for (int i = 0; i < kVectorSize; ++i)
+ ASSERT_FLOAT_EQ(output_vector.get()[i], value);
+ }
+
+ int BenchmarkIterations() {
+ int vector_math_iterations = kDefaultIterations;
+ std::string iterations(
+ CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ kBenchmarkIterations));
+ if (!iterations.empty())
+ base::StringToInt(iterations, &vector_math_iterations);
+ return vector_math_iterations;
+ }
+
+ protected:
+ int benchmark_iterations;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector;
+
+ DISALLOW_COPY_AND_ASSIGN(VectorMathTest);
+};
+
+// Ensure each optimized vector_math::FMAC() method returns the same value.
+TEST_F(VectorMathTest, FMAC) {
+ static const float kResult = kInputFillValue * kScale + kOutputFillValue;
+
+ {
+ SCOPED_TRACE("FMAC");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMAC(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+
+ {
+ SCOPED_TRACE("FMAC_C");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMAC_C(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+
+#if defined(ARCH_CPU_X86_FAMILY)
+ {
+ ASSERT_TRUE(base::CPU().has_sse());
+ SCOPED_TRACE("FMAC_SSE");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMAC_SSE(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+#endif
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+ {
+ SCOPED_TRACE("FMAC_NEON");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMAC_NEON(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+#endif
+}
+
+// Ensure each optimized vector_math::FMUL() method returns the same value.
+TEST_F(VectorMathTest, FMUL) {
+ static const float kResult = kInputFillValue * kScale;
+
+ {
+ SCOPED_TRACE("FMUL");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMUL(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+
+ {
+ SCOPED_TRACE("FMUL_C");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMUL_C(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+
+#if defined(ARCH_CPU_X86_FAMILY)
+ {
+ ASSERT_TRUE(base::CPU().has_sse());
+ SCOPED_TRACE("FMUL_SSE");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMUL_SSE(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+#endif
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+ {
+ SCOPED_TRACE("FMUL_NEON");
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ vector_math::FMUL_NEON(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ VerifyOutput(kResult);
+ }
+#endif
+}
+
+// Define platform independent function name for FMACBenchmark* tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define FMAC_FUNC FMAC_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define FMAC_FUNC FMAC_NEON
+#endif
+
+// Benchmark for each optimized vector_math::FMAC() method. Original benchmarks
+// were run with --vector-fmac-iterations=200000.
+TEST_F(VectorMathTest, FMACBenchmark) {
+ static const int kBenchmarkIterations = BenchmarkIterations();
+
+ printf("Benchmarking %d iterations:\n", kBenchmarkIterations);
+
+ // Benchmark FMAC_C().
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ TimeTicks start = TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ vector_math::FMAC_C(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ }
+ double total_time_c_ms = (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("FMAC_C took %.2fms.\n", total_time_c_ms);
+
+#if defined(FMAC_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+
+ // Benchmark FMAC_FUNC() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ start = TimeTicks::HighResNow();
+ for (int j = 0; j < kBenchmarkIterations; ++j) {
+ vector_math::FMAC_FUNC(
+ input_vector.get(), kScale, kVectorSize - 1, output_vector.get());
+ }
+ double total_time_optimized_unaligned_ms =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(FMAC_FUNC) " (unaligned size) took %.2fms; which is %.2fx "
+ "faster than FMAC_C.\n", total_time_optimized_unaligned_ms,
+ total_time_c_ms / total_time_optimized_unaligned_ms);
+
+ // Benchmark FMAC_FUNC() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ start = TimeTicks::HighResNow();
+ for (int j = 0; j < kBenchmarkIterations; ++j) {
+ vector_math::FMAC_FUNC(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ }
+ double total_time_optimized_aligned_ms =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(FMAC_FUNC) " (aligned) took %.2fms; which is %.2fx "
+ "faster than FMAC_C and %.2fx faster than "
+ STRINGIZE(FMAC_FUNC) " (unaligned).\n",
+ total_time_optimized_aligned_ms,
+ total_time_c_ms / total_time_optimized_aligned_ms,
+ total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
+#endif
+}
+
+#undef FMAC_FUNC
+
+// Define platform independent function name for FMULBenchmark* tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define FMUL_FUNC FMUL_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define FMUL_FUNC FMUL_NEON
+#endif
+
+// Benchmark for each optimized vector_math::FMUL() method. Original benchmarks
+// were run with --vector-math-iterations=200000.
+TEST_F(VectorMathTest, FMULBenchmark) {
+ static const int kBenchmarkIterations = BenchmarkIterations();
+
+ printf("Benchmarking %d iterations:\n", kBenchmarkIterations);
+
+ // Benchmark FMUL_C().
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ TimeTicks start = TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ vector_math::FMUL_C(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ }
+ double total_time_c_ms = (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("FMUL_C took %.2fms.\n", total_time_c_ms);
+
+#if defined(FMUL_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+
+ // Benchmark FMUL_SSE() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ start = TimeTicks::HighResNow();
+ for (int j = 0; j < kBenchmarkIterations; ++j) {
+ vector_math::FMUL_FUNC(
+ input_vector.get(), kScale, kVectorSize - 1, output_vector.get());
+ }
+ double total_time_optimized_unaligned_ms =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(FMUL_FUNC) " (unaligned size) took %.2fms; which is %.2fx "
+ "faster than FMUL_C.\n", total_time_optimized_unaligned_ms,
+ total_time_c_ms / total_time_optimized_unaligned_ms);
+
+ // Benchmark FMUL_SSE() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ FillTestVectors(kInputFillValue, kOutputFillValue);
+ start = TimeTicks::HighResNow();
+ for (int j = 0; j < kBenchmarkIterations; ++j) {
+ vector_math::FMUL_FUNC(
+ input_vector.get(), kScale, kVectorSize, output_vector.get());
+ }
+ double total_time_optimized_aligned_ms =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf(STRINGIZE(FMUL_FUNC) " (aligned) took %.2fms; which is %.2fx "
+ "faster than FMUL_C and %.2fx faster than "
+ STRINGIZE(FMUL_FUNC) " (unaligned).\n",
+ total_time_optimized_aligned_ms,
+ total_time_c_ms / total_time_optimized_aligned_ms,
+ total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
+#endif
+}
+
+#undef FMUL_FUNC
+
+} // namespace media
diff --git a/chromium/media/base/video_decoder.cc b/chromium/media/base/video_decoder.cc
new file mode 100644
index 00000000000..81397b7b676
--- /dev/null
+++ b/chromium/media/base/video_decoder.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_decoder.h"
+
+namespace media {
+
+VideoDecoder::VideoDecoder() {}
+
+VideoDecoder::~VideoDecoder() {}
+
+bool VideoDecoder::HasAlpha() const {
+ return false;
+}
+
+bool VideoDecoder::NeedsBitstreamConversion() const {
+ return false;
+}
+
+bool VideoDecoder::CanReadWithoutStalling() const {
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_decoder.h b/chromium/media/base/video_decoder.h
new file mode 100644
index 00000000000..63f63e5d7d6
--- /dev/null
+++ b/chromium/media/base/video_decoder.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_DECODER_H_
+#define MEDIA_BASE_VIDEO_DECODER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+
+class DecoderBuffer;
+class VideoDecoderConfig;
+class VideoFrame;
+
+class MEDIA_EXPORT VideoDecoder {
+ public:
+ // Status codes for decode operations on VideoDecoder.
+ enum Status {
+ kOk, // Everything went as planned.
+ kNotEnoughData, // Not enough data to produce a video frame.
+ kDecodeError, // Decoding error happened.
+ kDecryptError // Decrypting error happened.
+ };
+
+ VideoDecoder();
+ virtual ~VideoDecoder();
+
+ // Initializes a VideoDecoder with the given |config|, executing the
+ // |status_cb| upon completion.
+ //
+ // Note:
+ // 1) The VideoDecoder will be reinitialized if it was initialized before.
+ // Upon reinitialization, all internal buffered frames will be dropped.
+ // 2) This method should not be called during pending decode, reset or stop.
+ // 3) No VideoDecoder calls except for Stop() should be made before
+ // |status_cb| is executed.
+ virtual void Initialize(const VideoDecoderConfig& config,
+ const PipelineStatusCB& status_cb) = 0;
+
+ // Requests a |buffer| to be decoded. The status of the decoder and decoded
+ // frame are returned via the provided callback. Only one decode may be in
+ // flight at any given time.
+ //
+ // Implementations guarantee that the callback will not be called from within
+ // this method.
+ //
+ // If the returned status is kOk:
+ // - Non-EOS (end of stream) frame contains decoded video data.
+ // - EOS frame indicates the end of the stream.
+ // - NULL frame indicates an aborted decode. This can happen if Reset() or
+ // Stop() is called during the decoding process.
+ // Otherwise the returned frame must be NULL.
+ typedef base::Callback<void(Status,
+ const scoped_refptr<VideoFrame>&)> DecodeCB;
+ virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) = 0;
+
+ // Resets decoder state, fulfilling all pending DecodeCB and dropping extra
+ // queued decoded data. After this call, the decoder is back to an initialized
+ // clean state.
+ // Note: No VideoDecoder calls should be made before |closure| is executed.
+ virtual void Reset(const base::Closure& closure) = 0;
+
+ // Stops decoder, fires any pending callbacks and sets the decoder to an
+ // uninitialized state. A VideoDecoder cannot be re-initialized after it has
+ // been stopped.
+ // Note that if Initialize() has been called, Stop() must be called and
+ // complete before deleting the decoder.
+ virtual void Stop(const base::Closure& closure) = 0;
+
+ // Returns true if the output format has an alpha channel. Most formats do not
+ // have alpha so the default is false. Override and return true for decoders
+ // that return formats with an alpha channel.
+ virtual bool HasAlpha() const;
+
+ // Returns true if the decoder needs bitstream conversion before decoding.
+ virtual bool NeedsBitstreamConversion() const;
+
+ // Returns true if the decoder currently has the ability to decode and return
+ // a VideoFrame. Most implementations can allocate a new VideoFrame and hence
+ // this will always return true. Override and return false for decoders that
+ // use a fixed set of VideoFrames for decoding.
+ virtual bool CanReadWithoutStalling() const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_DECODER_H_
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
new file mode 100644
index 00000000000..da914f22fb9
--- /dev/null
+++ b/chromium/media/base/video_decoder_config.cc
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_decoder_config.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+
+namespace media {
+
+VideoDecoderConfig::VideoDecoderConfig()
+ : codec_(kUnknownVideoCodec),
+ profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
+ format_(VideoFrame::INVALID),
+ is_encrypted_(false) {
+}
+
+VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
+ VideoCodecProfile profile,
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted) {
+ Initialize(codec, profile, format, coded_size, visible_rect, natural_size,
+ extra_data, extra_data_size, is_encrypted, true);
+}
+
+VideoDecoderConfig::~VideoDecoderConfig() {}
+
+// Some videos just want to watch the world burn, with a height of 0; cap the
+// "infinite" aspect ratio resulting.
+static const int kInfiniteRatio = 99999;
+
+// Common aspect ratios (multiplied by 100 and truncated) used for histogramming
+// video sizes. These were taken on 20111103 from
+// http://wikipedia.org/wiki/Aspect_ratio_(image)#Previous_and_currently_used_aspect_ratios
+static const int kCommonAspectRatios100[] = {
+ 100, 115, 133, 137, 143, 150, 155, 160, 166, 175, 177, 185, 200, 210, 220,
+ 221, 235, 237, 240, 255, 259, 266, 276, 293, 400, 1200, kInfiniteRatio,
+};
+
+template<class T> // T has int width() & height() methods.
+static void UmaHistogramAspectRatio(const char* name, const T& size) {
+ UMA_HISTOGRAM_CUSTOM_ENUMERATION(
+ name,
+ // Intentionally use integer division to truncate the result.
+ size.height() ? (size.width() * 100) / size.height() : kInfiniteRatio,
+ base::CustomHistogram::ArrayToCustomRanges(
+ kCommonAspectRatios100, arraysize(kCommonAspectRatios100)));
+}
+
+void VideoDecoderConfig::Initialize(VideoCodec codec,
+ VideoCodecProfile profile,
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted,
+ bool record_stats) {
+ CHECK((extra_data_size != 0) == (extra_data != NULL));
+
+ if (record_stats) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCodec", codec, kVideoCodecMax + 1);
+ // Drop UNKNOWN because U_H_E() uses one bucket for all values less than 1.
+ if (profile >= 0) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCodecProfile", profile,
+ VIDEO_CODEC_PROFILE_MAX + 1);
+ }
+ UMA_HISTOGRAM_COUNTS_10000("Media.VideoCodedWidth", coded_size.width());
+ UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
+ UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
+ UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
+ }
+
+ codec_ = codec;
+ profile_ = profile;
+ format_ = format;
+ coded_size_ = coded_size;
+ visible_rect_ = visible_rect;
+ natural_size_ = natural_size;
+ extra_data_.assign(extra_data, extra_data + extra_data_size);
+ is_encrypted_ = is_encrypted;
+}
+
+bool VideoDecoderConfig::IsValidConfig() const {
+ return codec_ != kUnknownVideoCodec &&
+ natural_size_.width() > 0 &&
+ natural_size_.height() > 0 &&
+ VideoFrame::IsValidConfig(format_, coded_size_, visible_rect_,
+ natural_size_);
+}
+
+bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const {
+ return ((codec() == config.codec()) &&
+ (format() == config.format()) &&
+ (profile() == config.profile()) &&
+ (coded_size() == config.coded_size()) &&
+ (visible_rect() == config.visible_rect()) &&
+ (natural_size() == config.natural_size()) &&
+ (extra_data_size() == config.extra_data_size()) &&
+ (!extra_data() || !memcmp(extra_data(), config.extra_data(),
+ extra_data_size())) &&
+ (is_encrypted() == config.is_encrypted()));
+}
+
+std::string VideoDecoderConfig::AsHumanReadableString() const {
+ std::ostringstream s;
+ s << "codec: " << codec()
+ << " format: " << format()
+ << " profile: " << profile()
+ << " coded size: [" << coded_size().width()
+ << "," << coded_size().height() << "]"
+ << " visible rect: [" << visible_rect().x()
+ << "," << visible_rect().y()
+ << "," << visible_rect().width()
+ << "," << visible_rect().height() << "]"
+ << " natural size: [" << natural_size().width()
+ << "," << natural_size().height() << "]"
+ << " has extra data? " << (extra_data() ? "true" : "false")
+ << " encrypted? " << (is_encrypted() ? "true" : "false");
+ return s.str();
+}
+
+VideoCodec VideoDecoderConfig::codec() const {
+ return codec_;
+}
+
+VideoCodecProfile VideoDecoderConfig::profile() const {
+ return profile_;
+}
+
+VideoFrame::Format VideoDecoderConfig::format() const {
+ return format_;
+}
+
+gfx::Size VideoDecoderConfig::coded_size() const {
+ return coded_size_;
+}
+
+gfx::Rect VideoDecoderConfig::visible_rect() const {
+ return visible_rect_;
+}
+
+gfx::Size VideoDecoderConfig::natural_size() const {
+ return natural_size_;
+}
+
+const uint8* VideoDecoderConfig::extra_data() const {
+ if (extra_data_.empty())
+ return NULL;
+ return &extra_data_[0];
+}
+
+size_t VideoDecoderConfig::extra_data_size() const {
+ return extra_data_.size();
+}
+
+bool VideoDecoderConfig::is_encrypted() const {
+ return is_encrypted_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
new file mode 100644
index 00000000000..3f7db4e6ca8
--- /dev/null
+++ b/chromium/media/base/video_decoder_config.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
+#define MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+
+enum VideoCodec {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a codec replace it with a dummy value; when adding a
+ // codec, do so at the bottom (and update kVideoCodecMax).
+ kUnknownVideoCodec = 0,
+ kCodecH264,
+ kCodecVC1,
+ kCodecMPEG2,
+ kCodecMPEG4,
+ kCodecTheora,
+ kCodecVP8,
+ kCodecVP9,
+ // DO NOT ADD RANDOM VIDEO CODECS!
+ //
+ // The only acceptable time to add a new codec is if there is production code
+ // that uses said codec in the same CL.
+
+ kVideoCodecMax = kCodecVP9 // Must equal the last "real" codec above.
+};
+
+// Video stream profile. This *must* match PP_VideoDecoder_Profile.
+// (enforced in webkit/plugins/ppapi/ppb_video_decoder_impl.cc)
+enum VideoCodecProfile {
+ // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
+ // for example), and keep the values for a particular format grouped
+ // together for clarity.
+ VIDEO_CODEC_PROFILE_UNKNOWN = -1,
+ H264PROFILE_MIN = 0,
+ H264PROFILE_BASELINE = H264PROFILE_MIN,
+ H264PROFILE_MAIN = 1,
+ H264PROFILE_EXTENDED = 2,
+ H264PROFILE_HIGH = 3,
+ H264PROFILE_HIGH10PROFILE = 4,
+ H264PROFILE_HIGH422PROFILE = 5,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
+ H264PROFILE_SCALABLEBASELINE = 7,
+ H264PROFILE_SCALABLEHIGH = 8,
+ H264PROFILE_STEREOHIGH = 9,
+ H264PROFILE_MULTIVIEWHIGH = 10,
+ H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
+ VP8PROFILE_MIN = 11,
+ VP8PROFILE_MAIN = VP8PROFILE_MIN,
+ VP8PROFILE_MAX = VP8PROFILE_MAIN,
+ VP9PROFILE_MIN = 12,
+ VP9PROFILE_MAIN = VP9PROFILE_MIN,
+ VP9PROFILE_MAX = VP9PROFILE_MAIN,
+ VIDEO_CODEC_PROFILE_MAX = VP9PROFILE_MAX,
+};
+
+class MEDIA_EXPORT VideoDecoderConfig {
+ public:
+ // Constructs an uninitialized object. Clients should call Initialize() with
+ // appropriate values before using.
+ VideoDecoderConfig();
+
+ // Constructs an initialized object. It is acceptable to pass in NULL for
+ // |extra_data|, otherwise the memory is copied.
+ VideoDecoderConfig(VideoCodec codec,
+ VideoCodecProfile profile,
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const uint8* extra_data, size_t extra_data_size,
+ bool is_encrypted);
+
+ ~VideoDecoderConfig();
+
+ // Resets the internal state of this object.
+ void Initialize(VideoCodec codec,
+ VideoCodecProfile profile,
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ const uint8* extra_data, size_t extra_data_size,
+ bool is_encrypted,
+ bool record_stats);
+
+ // Returns true if this object has appropriate configuration values, false
+ // otherwise.
+ bool IsValidConfig() const;
+
+ // Returns true if all fields in |config| match this config.
+ // Note: The contents of |extra_data_| are compared not the raw pointers.
+ bool Matches(const VideoDecoderConfig& config) const;
+
+ // Returns a human-readable string describing |*this|. For debugging & test
+ // output only.
+ std::string AsHumanReadableString() const;
+
+ VideoCodec codec() const;
+ VideoCodecProfile profile() const;
+
+ // Video format used to determine YUV buffer sizes.
+ VideoFrame::Format format() const;
+
+ // Width and height of video frame immediately post-decode. Not all pixels
+ // in this region are valid.
+ gfx::Size coded_size() const;
+
+ // Region of |coded_size_| that is visible.
+ gfx::Rect visible_rect() const;
+
+ // Final visible width and height of a video frame with aspect ratio taken
+ // into account.
+ gfx::Size natural_size() const;
+
+ // Optional byte data required to initialize video decoders, such as H.264
+ // AAVC data.
+ const uint8* extra_data() const;
+ size_t extra_data_size() const;
+
+ // Whether the video stream is potentially encrypted.
+ // Note that in a potentially encrypted video stream, individual buffers
+ // can be encrypted or not encrypted.
+ bool is_encrypted() const;
+
+ private:
+ VideoCodec codec_;
+ VideoCodecProfile profile_;
+
+ VideoFrame::Format format_;
+
+ gfx::Size coded_size_;
+ gfx::Rect visible_rect_;
+ gfx::Size natural_size_;
+
+ std::vector<uint8> extra_data_;
+
+ bool is_encrypted_;
+
+ // Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
+ // generated copy constructor and assignment operator. Since the extra data is
+ // typically small, the performance impact is minimal.
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
new file mode 100644
index 00000000000..08e7e1ad7a2
--- /dev/null
+++ b/chromium/media/base/video_frame.cc
@@ -0,0 +1,461 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_frame.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/strings/string_piece.h"
+#include "media/base/limits.h"
+#include "media/base/video_util.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+
+namespace media {
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp) {
+ DCHECK(IsValidConfig(format, coded_size, visible_rect, natural_size));
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, coded_size, visible_rect, natural_size, timestamp));
+ switch (format) {
+ case VideoFrame::RGB32:
+ frame->AllocateRGB(4u);
+ break;
+ case VideoFrame::YV12:
+ case VideoFrame::YV12A:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ frame->AllocateYUV();
+ break;
+ default:
+ LOG(FATAL) << "Unsupported frame format: " << format;
+ }
+ return frame;
+}
+
+// static
+std::string VideoFrame::FormatToString(VideoFrame::Format format) {
+ switch (format) {
+ case VideoFrame::INVALID:
+ return "INVALID";
+ case VideoFrame::RGB32:
+ return "RGB32";
+ case VideoFrame::YV12:
+ return "YV12";
+ case VideoFrame::YV16:
+ return "YV16";
+ case VideoFrame::EMPTY:
+ return "EMPTY";
+ case VideoFrame::I420:
+ return "I420";
+ case VideoFrame::NATIVE_TEXTURE:
+ return "NATIVE_TEXTURE";
+#if defined(GOOGLE_TV)
+ case VideoFrame::HOLE:
+ return "HOLE";
+#endif
+ case VideoFrame::YV12A:
+ return "YV12A";
+ }
+ NOTREACHED() << "Invalid videoframe format provided: " << format;
+ return "";
+}
+
+// static
+bool VideoFrame::IsValidConfig(VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size) {
+ return (format != VideoFrame::INVALID &&
+ !coded_size.IsEmpty() &&
+ coded_size.GetArea() <= limits::kMaxCanvas &&
+ coded_size.width() <= limits::kMaxDimension &&
+ coded_size.height() <= limits::kMaxDimension &&
+ !visible_rect.IsEmpty() &&
+ visible_rect.x() >= 0 && visible_rect.y() >= 0 &&
+ visible_rect.right() <= coded_size.width() &&
+ visible_rect.bottom() <= coded_size.height() &&
+ !natural_size.IsEmpty() &&
+ natural_size.GetArea() <= limits::kMaxCanvas &&
+ natural_size.width() <= limits::kMaxDimension &&
+ natural_size.height() <= limits::kMaxDimension);
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
+ const scoped_refptr<MailboxHolder>& mailbox_holder,
+ uint32 texture_target,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ const ReadPixelsCB& read_pixels_cb,
+ const base::Closure& no_longer_needed_cb) {
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ NATIVE_TEXTURE, coded_size, visible_rect, natural_size, timestamp));
+ frame->texture_mailbox_holder_ = mailbox_holder;
+ frame->texture_target_ = texture_target;
+ frame->read_pixels_cb_ = read_pixels_cb;
+ frame->no_longer_needed_cb_ = no_longer_needed_cb;
+
+ return frame;
+}
+
+void VideoFrame::ReadPixelsFromNativeTexture(const SkBitmap& pixels) {
+ DCHECK_EQ(format_, NATIVE_TEXTURE);
+ if (!read_pixels_cb_.is_null())
+ read_pixels_cb_.Run(pixels);
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ uint8* data,
+ base::SharedMemoryHandle handle,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb) {
+ switch (format) {
+ case I420: {
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, coded_size, visible_rect, natural_size, timestamp));
+ frame->shared_memory_handle_ = handle;
+ frame->strides_[kYPlane] = coded_size.width();
+ frame->strides_[kUPlane] = coded_size.width() / 2;
+ frame->strides_[kVPlane] = coded_size.width() / 2;
+ frame->data_[kYPlane] = data;
+ frame->data_[kUPlane] = data + coded_size.GetArea();
+ frame->data_[kVPlane] = data + (coded_size.GetArea() * 5 / 4);
+ frame->no_longer_needed_cb_ = no_longer_needed_cb;
+ return frame;
+ }
+ default:
+ NOTIMPLEMENTED();
+ return NULL;
+ }
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ int32 y_stride,
+ int32 u_stride,
+ int32 v_stride,
+ uint8* y_data,
+ uint8* u_data,
+ uint8* v_data,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb) {
+ DCHECK(format == YV12 || format == YV16 || format == I420) << format;
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, coded_size, visible_rect, natural_size, timestamp));
+ frame->strides_[kYPlane] = y_stride;
+ frame->strides_[kUPlane] = u_stride;
+ frame->strides_[kVPlane] = v_stride;
+ frame->data_[kYPlane] = y_data;
+ frame->data_[kUPlane] = u_data;
+ frame->data_[kVPlane] = v_data;
+ frame->no_longer_needed_cb_ = no_longer_needed_cb;
+ return frame;
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() {
+ return new VideoFrame(
+ VideoFrame::EMPTY, gfx::Size(), gfx::Rect(), gfx::Size(),
+ base::TimeDelta());
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateColorFrame(
+ const gfx::Size& size,
+ uint8 y, uint8 u, uint8 v,
+ base::TimeDelta timestamp) {
+ DCHECK(IsValidConfig(VideoFrame::YV12, size, gfx::Rect(size), size));
+ scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
+ VideoFrame::YV12, size, gfx::Rect(size), size, timestamp);
+ FillYUV(frame.get(), y, u, v);
+ return frame;
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(const gfx::Size& size) {
+ const uint8 kBlackY = 0x00;
+ const uint8 kBlackUV = 0x80;
+ const base::TimeDelta kZero;
+ return CreateColorFrame(size, kBlackY, kBlackUV, kBlackUV, kZero);
+}
+
+#if defined(GOOGLE_TV)
+// This block and other blocks wrapped around #if defined(GOOGLE_TV) is not
+// maintained by the general compositor team. Please contact the following
+// people instead:
+//
+// wonsik@chromium.org
+// ycheo@chromium.org
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
+ const gfx::Size& size) {
+ DCHECK(IsValidConfig(VideoFrame::HOLE, size, gfx::Rect(size), size));
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta()));
+ return frame;
+}
+#endif
+
+// static
+size_t VideoFrame::NumPlanes(Format format) {
+ switch (format) {
+ case VideoFrame::NATIVE_TEXTURE:
+#if defined(GOOGLE_TV)
+ case VideoFrame::HOLE:
+#endif
+ return 0;
+ case VideoFrame::RGB32:
+ return 1;
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ return 3;
+ case VideoFrame::YV12A:
+ return 4;
+ case VideoFrame::EMPTY:
+ case VideoFrame::INVALID:
+ break;
+ }
+ NOTREACHED() << "Unsupported video frame format: " << format;
+ return 0;
+}
+
+static inline size_t RoundUp(size_t value, size_t alignment) {
+ // Check that |alignment| is a power of 2.
+ DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
+ return ((value + (alignment - 1)) & ~(alignment-1));
+}
+
+// Release data allocated by AllocateRGB() or AllocateYUV().
+static void ReleaseData(uint8* data) {
+ DCHECK(data);
+ base::AlignedFree(data);
+}
+
+void VideoFrame::AllocateRGB(size_t bytes_per_pixel) {
+ // Round up to align at least at a 16-byte boundary for each row.
+ // This is sufficient for MMX and SSE2 reads (movq/movdqa).
+ size_t bytes_per_row = RoundUp(coded_size_.width(),
+ kFrameSizeAlignment) * bytes_per_pixel;
+ size_t aligned_height = RoundUp(coded_size_.height(), kFrameSizeAlignment);
+ strides_[VideoFrame::kRGBPlane] = bytes_per_row;
+ data_[VideoFrame::kRGBPlane] = reinterpret_cast<uint8*>(
+ base::AlignedAlloc(bytes_per_row * aligned_height + kFrameSizePadding,
+ kFrameAddressAlignment));
+ no_longer_needed_cb_ = base::Bind(&ReleaseData, data_[VideoFrame::kRGBPlane]);
+ DCHECK(!(reinterpret_cast<intptr_t>(data_[VideoFrame::kRGBPlane]) & 7));
+ COMPILE_ASSERT(0 == VideoFrame::kRGBPlane, RGB_data_must_be_index_0);
+}
+
+void VideoFrame::AllocateYUV() {
+ DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
+ format_ == VideoFrame::YV12A || format_ == VideoFrame::I420);
+ // Align Y rows at least at 16 byte boundaries. The stride for both
+ // YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
+ // U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
+ // the case of YV12 the strides are identical for the same width surface, but
+ // the number of bytes allocated for YV12 is 1/2 the amount for U & V as
+ // YV16. We also round the height of the surface allocated to be an even
+ // number to avoid any potential of faulting by code that attempts to access
+ // the Y values of the final row, but assumes that the last row of U & V
+ // applies to a full two rows of Y. YV12A is the same as YV12, but with an
+ // additional alpha plane that has the same size and alignment as the Y plane.
+
+ size_t y_stride = RoundUp(row_bytes(VideoFrame::kYPlane),
+ kFrameSizeAlignment);
+ size_t uv_stride = RoundUp(row_bytes(VideoFrame::kUPlane),
+ kFrameSizeAlignment);
+ // The *2 here is because some formats (e.g. h264) allow interlaced coding,
+ // and then the size needs to be a multiple of two macroblocks (vertically).
+ // See libavcodec/utils.c:avcodec_align_dimensions2().
+ size_t y_height = RoundUp(coded_size_.height(), kFrameSizeAlignment * 2);
+ size_t uv_height =
+ (format_ == VideoFrame::YV12 || format_ == VideoFrame::YV12A ||
+ format_ == VideoFrame::I420)
+ ? y_height / 2
+ : y_height;
+ size_t y_bytes = y_height * y_stride;
+ size_t uv_bytes = uv_height * uv_stride;
+ size_t a_bytes = format_ == VideoFrame::YV12A ? y_bytes : 0;
+
+ // The extra line of UV being allocated is because h264 chroma MC
+ // overreads by one line in some cases, see libavcodec/utils.c:
+ // avcodec_align_dimensions2() and libavcodec/x86/h264_chromamc.asm:
+ // put_h264_chroma_mc4_ssse3().
+ uint8* data = reinterpret_cast<uint8*>(
+ base::AlignedAlloc(
+ y_bytes + (uv_bytes * 2 + uv_stride) + a_bytes + kFrameSizePadding,
+ kFrameAddressAlignment));
+ no_longer_needed_cb_ = base::Bind(&ReleaseData, data);
+ COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0);
+ data_[VideoFrame::kYPlane] = data;
+ data_[VideoFrame::kUPlane] = data + y_bytes;
+ data_[VideoFrame::kVPlane] = data + y_bytes + uv_bytes;
+ strides_[VideoFrame::kYPlane] = y_stride;
+ strides_[VideoFrame::kUPlane] = uv_stride;
+ strides_[VideoFrame::kVPlane] = uv_stride;
+ if (format_ == YV12A) {
+ data_[VideoFrame::kAPlane] = data + y_bytes + (2 * uv_bytes);
+ strides_[VideoFrame::kAPlane] = y_stride;
+ }
+}
+
+VideoFrame::VideoFrame(VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp)
+ : format_(format),
+ coded_size_(coded_size),
+ visible_rect_(visible_rect),
+ natural_size_(natural_size),
+ texture_target_(0),
+ shared_memory_handle_(base::SharedMemory::NULLHandle()),
+ timestamp_(timestamp) {
+ memset(&strides_, 0, sizeof(strides_));
+ memset(&data_, 0, sizeof(data_));
+}
+
+VideoFrame::~VideoFrame() {
+ if (!no_longer_needed_cb_.is_null())
+ base::ResetAndReturn(&no_longer_needed_cb_).Run();
+}
+
+bool VideoFrame::IsValidPlane(size_t plane) const {
+ return (plane < NumPlanes(format_));
+}
+
+int VideoFrame::stride(size_t plane) const {
+ DCHECK(IsValidPlane(plane));
+ return strides_[plane];
+}
+
+int VideoFrame::row_bytes(size_t plane) const {
+ DCHECK(IsValidPlane(plane));
+ int width = coded_size_.width();
+ switch (format_) {
+ // 32bpp.
+ case RGB32:
+ return width * 4;
+
+ // Planar, 8bpp.
+ case YV12A:
+ if (plane == kAPlane)
+ return width;
+ // Fallthrough.
+ case YV12:
+ case YV16:
+ case I420:
+ if (plane == kYPlane)
+ return width;
+ return RoundUp(width, 2) / 2;
+
+ default:
+ break;
+ }
+
+ // Intentionally leave out non-production formats.
+ NOTREACHED() << "Unsupported video frame format: " << format_;
+ return 0;
+}
+
+int VideoFrame::rows(size_t plane) const {
+ DCHECK(IsValidPlane(plane));
+ int height = coded_size_.height();
+ switch (format_) {
+ case RGB32:
+ case YV16:
+ return height;
+
+ case YV12A:
+ if (plane == kAPlane)
+ return height;
+ // Fallthrough.
+ case YV12:
+ case I420:
+ if (plane == kYPlane)
+ return height;
+ return RoundUp(height, 2) / 2;
+
+ default:
+ break;
+ }
+
+ // Intentionally leave out non-production formats.
+ NOTREACHED() << "Unsupported video frame format: " << format_;
+ return 0;
+}
+
+uint8* VideoFrame::data(size_t plane) const {
+ DCHECK(IsValidPlane(plane));
+ return data_[plane];
+}
+
+const scoped_refptr<VideoFrame::MailboxHolder>& VideoFrame::texture_mailbox()
+ const {
+ DCHECK_EQ(format_, NATIVE_TEXTURE);
+ return texture_mailbox_holder_;
+}
+
+uint32 VideoFrame::texture_target() const {
+ DCHECK_EQ(format_, NATIVE_TEXTURE);
+ return texture_target_;
+}
+
+base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
+ return shared_memory_handle_;
+}
+
+bool VideoFrame::IsEndOfStream() const {
+ return format_ == VideoFrame::EMPTY;
+}
+
+void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
+ for (int plane = 0; plane < kMaxPlanes; ++plane) {
+ if (!IsValidPlane(plane))
+ break;
+ for (int row = 0; row < rows(plane); ++row) {
+ base::MD5Update(context, base::StringPiece(
+ reinterpret_cast<char*>(data(plane) + stride(plane) * row),
+ row_bytes(plane)));
+ }
+ }
+}
+
+VideoFrame::MailboxHolder::MailboxHolder(
+ const gpu::Mailbox& mailbox,
+ unsigned sync_point,
+ const TextureNoLongerNeededCallback& release_callback)
+ : mailbox_(mailbox),
+ sync_point_(sync_point),
+ release_callback_(release_callback) {}
+
+VideoFrame::MailboxHolder::~MailboxHolder() {
+ if (!release_callback_.is_null())
+ release_callback_.Run(sync_point_);
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
new file mode 100644
index 00000000000..82a08a986de
--- /dev/null
+++ b/chromium/media/base/video_frame.h
@@ -0,0 +1,294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_FRAME_H_
+#define MEDIA_BASE_VIDEO_FRAME_H_
+
+#include "base/callback.h"
+#include "base/md5.h"
+#include "base/memory/shared_memory.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "media/base/buffers.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+class SkBitmap;
+
+namespace media {
+
+class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
+ public:
+ enum {
+ kFrameSizeAlignment = 16,
+ kFrameSizePadding = 16,
+ kFrameAddressAlignment = 32
+ };
+
+ enum {
+ kMaxPlanes = 4,
+
+ kRGBPlane = 0,
+
+ kYPlane = 0,
+ kUPlane = 1,
+ kVPlane = 2,
+ kAPlane = 3,
+ };
+
+ // Surface formats roughly based on FOURCC labels, see:
+ // http://www.fourcc.org/rgb.php
+ // http://www.fourcc.org/yuv.php
+ enum Format {
+ INVALID = 0, // Invalid format value. Used for error reporting.
+ RGB32 = 4, // 32bpp RGB packed with extra byte 8:8:8
+ YV12 = 6, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
+ YV16 = 7, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
+ EMPTY = 9, // An empty frame.
+ I420 = 11, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ NATIVE_TEXTURE = 12, // Native texture. Pixel-format agnostic.
+#if defined(GOOGLE_TV)
+ HOLE = 13, // Hole frame.
+#endif
+ YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+ };
+
+ // Returns the name of a Format as a string.
+ static std::string FormatToString(Format format);
+
+ // This class calls the TextureNoLongerNeededCallback when the last reference
+ // on the class is destroyed. The VideoFrame holds a reference to the mailbox
+ // but anyone else who queries the mailbox should also hold a reference while
+ // it is uses the mailbox, to ensure it remains valid. When finished with the
+ // mailbox, call Return() with a new sync point, to ensure the mailbox remains
+ // valid for the issued commands.
+ class MEDIA_EXPORT MailboxHolder
+ : public base::RefCountedThreadSafe<MailboxHolder> {
+ public:
+ typedef base::Callback<void(uint32 sync_point)>
+ TextureNoLongerNeededCallback;
+
+ MailboxHolder(const gpu::Mailbox& mailbox,
+ unsigned sync_point,
+ const TextureNoLongerNeededCallback& release_callback);
+
+ const gpu::Mailbox& mailbox() const { return mailbox_; }
+ unsigned sync_point() const { return sync_point_; }
+
+ void Return(unsigned sync_point) { sync_point_ = sync_point; }
+
+ private:
+ friend class base::RefCountedThreadSafe<MailboxHolder>;
+ ~MailboxHolder();
+
+ gpu::Mailbox mailbox_;
+ unsigned sync_point_;
+ TextureNoLongerNeededCallback release_callback_;
+ };
+
+
+ // Creates a new frame in system memory with given parameters. Buffers for
+ // the frame are allocated but not initialized.
+ // |coded_size| is the width and height of the frame data in pixels.
+ // |visible_rect| is the visible portion of |coded_size|, after cropping (if
+ // any) is applied.
+ // |natural_size| is the width and height of the frame when the frame's aspect
+ // ratio is applied to |visible_rect|.
+ static scoped_refptr<VideoFrame> CreateFrame(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp);
+
+ // Call prior to CreateFrame to ensure validity of frame configuration. Called
+ // automatically by VideoDecoderConfig::IsValidConfig().
+ // TODO(scherkus): VideoDecoderConfig shouldn't call this method
+ static bool IsValidConfig(Format format, const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size);
+
+ // CB to write pixels from the texture backing this frame into the
+ // |const SkBitmap&| parameter.
+ typedef base::Callback<void(const SkBitmap&)> ReadPixelsCB;
+
+ // Wraps a native texture of the given parameters with a VideoFrame. When the
+ // frame is destroyed |no_longer_needed_cb.Run()| will be called.
+ // |coded_size| is the width and height of the frame data in pixels.
+ // |visible_rect| is the visible portion of |coded_size|, after cropping (if
+ // any) is applied.
+ // |natural_size| is the width and height of the frame when the frame's aspect
+ // ratio is applied to |visible_rect|.
+
+ // |read_pixels_cb| may be used to do (slow!) readbacks from the
+ // texture to main memory.
+ static scoped_refptr<VideoFrame> WrapNativeTexture(
+ const scoped_refptr<MailboxHolder>& mailbox_holder,
+ uint32 texture_target,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ const ReadPixelsCB& read_pixels_cb,
+ const base::Closure& no_longer_needed_cb);
+
+ // Read pixels from the native texture backing |*this| and write
+ // them to |pixels| as BGRA. |pixels| must point to a buffer at
+ // least as large as 4*visible_rect().width()*visible_rect().height().
+ void ReadPixelsFromNativeTexture(const SkBitmap& pixels);
+
+ // Wraps image data in a buffer backed by a base::SharedMemoryHandle with a
+ // VideoFrame. The image data resides in |data| and is assumed to be packed
+ // tightly in a buffer of logical dimensions |coded_size| with the appropriate
+ // bit depth and plane count as given by |format|. When the frame is
+ // destroyed |no_longer_needed_cb.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapExternalSharedMemory(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ uint8* data,
+ base::SharedMemoryHandle handle,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb);
+
+ // Wraps external YUV data of the given parameters with a VideoFrame.
+ // The returned VideoFrame does not own the data passed in. When the frame
+ // is destroyed |no_longer_needed_cb.Run()| will be called.
+ // TODO(sheu): merge this into WrapExternalSharedMemory().
+ // http://crbug.com/270217
+ static scoped_refptr<VideoFrame> WrapExternalYuvData(
+ Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ int32 y_stride,
+ int32 u_stride,
+ int32 v_stride,
+ uint8* y_data,
+ uint8* u_data,
+ uint8* v_data,
+ base::TimeDelta timestamp,
+ const base::Closure& no_longer_needed_cb);
+
+ // Creates a frame with format equals to VideoFrame::EMPTY, width, height,
+ // and timestamp are all 0.
+ static scoped_refptr<VideoFrame> CreateEmptyFrame();
+
+ // Allocates YV12 frame based on |size|, and sets its data to the YUV(y,u,v).
+ static scoped_refptr<VideoFrame> CreateColorFrame(
+ const gfx::Size& size,
+ uint8 y, uint8 u, uint8 v,
+ base::TimeDelta timestamp);
+
+ // Allocates YV12 frame based on |size|, and sets its data to the YUV
+ // equivalent of RGB(0,0,0).
+ static scoped_refptr<VideoFrame> CreateBlackFrame(const gfx::Size& size);
+
+#if defined(GOOGLE_TV)
+ // Allocates a hole frame.
+ static scoped_refptr<VideoFrame> CreateHoleFrame(const gfx::Size& size);
+#endif
+
+ static size_t NumPlanes(Format format);
+
+ Format format() const { return format_; }
+
+ const gfx::Size& coded_size() const { return coded_size_; }
+ const gfx::Rect& visible_rect() const { return visible_rect_; }
+ const gfx::Size& natural_size() const { return natural_size_; }
+
+ int stride(size_t plane) const;
+
+ // Returns the number of bytes per row and number of rows for a given plane.
+ //
+ // As opposed to stride(), row_bytes() refers to the bytes representing
+ // frame data scanlines (coded_size.width() pixels, without stride padding).
+ int row_bytes(size_t plane) const;
+ int rows(size_t plane) const;
+
+ // Returns pointer to the buffer for a given plane. The memory is owned by
+ // VideoFrame object and must not be freed by the caller.
+ uint8* data(size_t plane) const;
+
+ // Returns the mailbox of the native texture wrapped by this frame. Only
+ // valid to call if this is a NATIVE_TEXTURE frame. Before using the
+ // mailbox, the caller must wait for the included sync point.
+ const scoped_refptr<MailboxHolder>& texture_mailbox() const;
+
+ // Returns the texture target. Only valid for NATIVE_TEXTURE frames.
+ uint32 texture_target() const;
+
+ // Returns the shared-memory handle, if present
+ base::SharedMemoryHandle shared_memory_handle() const;
+
+ // Returns true if this VideoFrame represents the end of the stream.
+ bool IsEndOfStream() const;
+
+ base::TimeDelta GetTimestamp() const {
+ return timestamp_;
+ }
+ void SetTimestamp(const base::TimeDelta& timestamp) {
+ timestamp_ = timestamp;
+ }
+
+ // Used to keep a running hash of seen frames. Expects an initialized MD5
+ // context. Calls MD5Update with the context and the contents of the frame.
+ void HashFrameForTesting(base::MD5Context* context);
+
+ private:
+ friend class base::RefCountedThreadSafe<VideoFrame>;
+ // Clients must use the static CreateFrame() method to create a new frame.
+ VideoFrame(Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp);
+ virtual ~VideoFrame();
+
+ // Used internally by CreateFrame().
+ void AllocateRGB(size_t bytes_per_pixel);
+ void AllocateYUV();
+
+ // Used to DCHECK() plane parameters.
+ bool IsValidPlane(size_t plane) const;
+
+ // Frame format.
+ Format format_;
+
+ // Width and height of the video frame.
+ gfx::Size coded_size_;
+
+ // Width, height, and offsets of the visible portion of the video frame.
+ gfx::Rect visible_rect_;
+
+ // Width and height of the visible portion of the video frame with aspect
+ // ratio taken into account.
+ gfx::Size natural_size_;
+
+ // Array of strides for each plane, typically greater or equal to the width
+ // of the surface divided by the horizontal sampling period. Note that
+ // strides can be negative.
+ int32 strides_[kMaxPlanes];
+
+ // Array of data pointers to each plane.
+ uint8* data_[kMaxPlanes];
+
+ // Native texture mailbox, if this is a NATIVE_TEXTURE frame.
+ scoped_refptr<MailboxHolder> texture_mailbox_holder_;
+ uint32 texture_target_;
+ ReadPixelsCB read_pixels_cb_;
+
+ // Shared memory handle, if this frame was allocated from shared memory.
+ base::SharedMemoryHandle shared_memory_handle_;
+
+ base::Closure no_longer_needed_cb_;
+
+ base::TimeDelta timestamp_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_FRAME_H_
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
new file mode 100644
index 00000000000..b88d20c3639
--- /dev/null
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -0,0 +1,425 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_frame.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/format_macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/buffers.h"
+#include "media/base/yuv_convert.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+using base::MD5DigestToBase16;
+
+// Helper function that initializes a YV12 frame with white and black scan
+// lines based on the |white_to_black| parameter. If 0, then the entire
+// frame will be black, if 1 then the entire frame will be white.
+void InitializeYV12Frame(VideoFrame* frame, double white_to_black) {
+ EXPECT_EQ(VideoFrame::YV12, frame->format());
+ int first_black_row = static_cast<int>(frame->coded_size().height() *
+ white_to_black);
+ uint8* y_plane = frame->data(VideoFrame::kYPlane);
+ for (int row = 0; row < frame->coded_size().height(); ++row) {
+ int color = (row < first_black_row) ? 0xFF : 0x00;
+ memset(y_plane, color, frame->stride(VideoFrame::kYPlane));
+ y_plane += frame->stride(VideoFrame::kYPlane);
+ }
+ uint8* u_plane = frame->data(VideoFrame::kUPlane);
+ uint8* v_plane = frame->data(VideoFrame::kVPlane);
+ for (int row = 0; row < frame->coded_size().height(); row += 2) {
+ memset(u_plane, 0x80, frame->stride(VideoFrame::kUPlane));
+ memset(v_plane, 0x80, frame->stride(VideoFrame::kVPlane));
+ u_plane += frame->stride(VideoFrame::kUPlane);
+ v_plane += frame->stride(VideoFrame::kVPlane);
+ }
+}
+
+// Given a |yv12_frame| this method converts the YV12 frame to RGBA and
+// makes sure that all the pixels of the RBG frame equal |expect_rgb_color|.
+void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
+ ASSERT_EQ(VideoFrame::YV12, yv12_frame->format());
+ ASSERT_EQ(yv12_frame->stride(VideoFrame::kUPlane),
+ yv12_frame->stride(VideoFrame::kVPlane));
+
+ scoped_refptr<media::VideoFrame> rgb_frame;
+ rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGB32,
+ yv12_frame->coded_size(),
+ yv12_frame->visible_rect(),
+ yv12_frame->natural_size(),
+ yv12_frame->GetTimestamp());
+
+ ASSERT_EQ(yv12_frame->coded_size().width(),
+ rgb_frame->coded_size().width());
+ ASSERT_EQ(yv12_frame->coded_size().height(),
+ rgb_frame->coded_size().height());
+
+ media::ConvertYUVToRGB32(yv12_frame->data(VideoFrame::kYPlane),
+ yv12_frame->data(VideoFrame::kUPlane),
+ yv12_frame->data(VideoFrame::kVPlane),
+ rgb_frame->data(VideoFrame::kRGBPlane),
+ rgb_frame->coded_size().width(),
+ rgb_frame->coded_size().height(),
+ yv12_frame->stride(VideoFrame::kYPlane),
+ yv12_frame->stride(VideoFrame::kUPlane),
+ rgb_frame->stride(VideoFrame::kRGBPlane),
+ media::YV12);
+
+ for (int row = 0; row < rgb_frame->coded_size().height(); ++row) {
+ uint32* rgb_row_data = reinterpret_cast<uint32*>(
+ rgb_frame->data(VideoFrame::kRGBPlane) +
+ (rgb_frame->stride(VideoFrame::kRGBPlane) * row));
+ for (int col = 0; col < rgb_frame->coded_size().width(); ++col) {
+ SCOPED_TRACE(
+ base::StringPrintf("Checking (%d, %d)", row, col));
+ EXPECT_EQ(expect_rgb_color, rgb_row_data[col]);
+ }
+ }
+}
+
+// Fill each plane to its reported extents and verify accessors report non
+// zero values. Additionally, for the first plane verify the rows and
+// row_bytes values are correct.
+void ExpectFrameExtents(VideoFrame::Format format, int planes,
+ int bytes_per_pixel, const char* expected_hash) {
+ const unsigned char kFillByte = 0x80;
+ const int kWidth = 61;
+ const int kHeight = 31;
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
+ format, size, gfx::Rect(size), size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+
+ for(int plane = 0; plane < planes; plane++) {
+ SCOPED_TRACE(base::StringPrintf("Checking plane %d", plane));
+ EXPECT_TRUE(frame->data(plane));
+ EXPECT_TRUE(frame->stride(plane));
+ EXPECT_TRUE(frame->rows(plane));
+ EXPECT_TRUE(frame->row_bytes(plane));
+
+ if (plane == 0) {
+ EXPECT_EQ(frame->rows(plane), kHeight);
+ EXPECT_EQ(frame->row_bytes(plane), kWidth * bytes_per_pixel);
+ }
+
+ memset(frame->data(plane), kFillByte,
+ frame->stride(plane) * frame->rows(plane));
+ }
+
+ base::MD5Context context;
+ base::MD5Init(&context);
+ frame->HashFrameForTesting(&context);
+ base::MD5Digest digest;
+ base::MD5Final(&digest, &context);
+ EXPECT_EQ(MD5DigestToBase16(digest), expected_hash);
+}
+
+TEST(VideoFrame, CreateFrame) {
+ const int kWidth = 64;
+ const int kHeight = 48;
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+
+ // Create a YV12 Video Frame.
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> frame =
+ VideoFrame::CreateFrame(media::VideoFrame::YV12, size, gfx::Rect(size),
+ size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+
+ // Test VideoFrame implementation.
+ EXPECT_EQ(media::VideoFrame::YV12, frame->format());
+ {
+ SCOPED_TRACE("");
+ InitializeYV12Frame(frame.get(), 0.0f);
+ ExpectFrameColor(frame.get(), 0xFF000000);
+ }
+ base::MD5Digest digest;
+ base::MD5Context context;
+ base::MD5Init(&context);
+ frame->HashFrameForTesting(&context);
+ base::MD5Final(&digest, &context);
+ EXPECT_EQ(MD5DigestToBase16(digest), "9065c841d9fca49186ef8b4ef547e79b");
+ {
+ SCOPED_TRACE("");
+ InitializeYV12Frame(frame.get(), 1.0f);
+ ExpectFrameColor(frame.get(), 0xFFFFFFFF);
+ }
+ base::MD5Init(&context);
+ frame->HashFrameForTesting(&context);
+ base::MD5Final(&digest, &context);
+ EXPECT_EQ(MD5DigestToBase16(digest), "911991d51438ad2e1a40ed5f6fc7c796");
+
+ // Test an empty frame.
+ frame = VideoFrame::CreateEmptyFrame();
+ EXPECT_TRUE(frame->IsEndOfStream());
+}
+
+TEST(VideoFrame, CreateBlackFrame) {
+ const int kWidth = 2;
+ const int kHeight = 2;
+ const uint8 kExpectedYRow[] = { 0, 0 };
+ const uint8 kExpectedUVRow[] = { 128 };
+
+ scoped_refptr<media::VideoFrame> frame =
+ VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight));
+ ASSERT_TRUE(frame.get());
+
+ // Test basic properties.
+ EXPECT_EQ(0, frame->GetTimestamp().InMicroseconds());
+ EXPECT_FALSE(frame->IsEndOfStream());
+
+ // Test |frame| properties.
+ EXPECT_EQ(VideoFrame::YV12, frame->format());
+ EXPECT_EQ(kWidth, frame->coded_size().width());
+ EXPECT_EQ(kHeight, frame->coded_size().height());
+
+ // Test frames themselves.
+ uint8* y_plane = frame->data(VideoFrame::kYPlane);
+ for (int y = 0; y < frame->coded_size().height(); ++y) {
+ EXPECT_EQ(0, memcmp(kExpectedYRow, y_plane, arraysize(kExpectedYRow)));
+ y_plane += frame->stride(VideoFrame::kYPlane);
+ }
+
+ uint8* u_plane = frame->data(VideoFrame::kUPlane);
+ uint8* v_plane = frame->data(VideoFrame::kVPlane);
+ for (int y = 0; y < frame->coded_size().height() / 2; ++y) {
+ EXPECT_EQ(0, memcmp(kExpectedUVRow, u_plane, arraysize(kExpectedUVRow)));
+ EXPECT_EQ(0, memcmp(kExpectedUVRow, v_plane, arraysize(kExpectedUVRow)));
+ u_plane += frame->stride(VideoFrame::kUPlane);
+ v_plane += frame->stride(VideoFrame::kVPlane);
+ }
+}
+
+// Ensure each frame is properly sized and allocated. Will trigger OOB reads
+// and writes as well as incorrect frame hashes otherwise.
+TEST(VideoFrame, CheckFrameExtents) {
+ // Each call consists of a VideoFrame::Format, # of planes, bytes per pixel,
+ // and the expected hash of all planes if filled with kFillByte (defined in
+ // ExpectFrameExtents).
+ ExpectFrameExtents(
+ VideoFrame::RGB32, 1, 4, "de6d3d567e282f6a38d478f04fc81fb0");
+ ExpectFrameExtents(
+ VideoFrame::YV12, 3, 1, "71113bdfd4c0de6cf62f48fb74f7a0b1");
+ ExpectFrameExtents(
+ VideoFrame::YV16, 3, 1, "9bb99ac3ff350644ebff4d28dc01b461");
+}
+
+static void TextureCallback(uint32* called_sync_point, uint32 sync_point) {
+ *called_sync_point = sync_point;
+}
+
+// Verify the TextureNoLongerNeededCallback is called when VideoFrame is
+// destroyed with the original sync point.
+TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
+ uint32 sync_point = 7;
+ uint32 called_sync_point = 0;
+
+ {
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
+ new VideoFrame::MailboxHolder(
+ gpu::Mailbox(),
+ sync_point,
+ base::Bind(&TextureCallback, &called_sync_point)),
+ 5, // texture_target
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
+ base::Closure()); // no_longer_needed_cb
+
+ EXPECT_EQ(0u, called_sync_point);
+ }
+ EXPECT_EQ(sync_point, called_sync_point);
+}
+
+// Verify the TextureNoLongerNeededCallback is called when VideoFrame is
+// destroyed with the new sync point, when the mailbox is taken by a caller.
+TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
+ uint32 called_sync_point = 0;
+
+ gpu::Mailbox mailbox;
+ mailbox.name[0] = 50;
+ uint32 sync_point = 7;
+ uint32 target = 9;
+
+ {
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
+ new VideoFrame::MailboxHolder(
+ mailbox,
+ sync_point,
+ base::Bind(&TextureCallback, &called_sync_point)),
+ target,
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
+ base::Closure()); // no_longer_needed_cb
+
+ {
+ scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder =
+ frame->texture_mailbox();
+
+ EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point());
+ EXPECT_EQ(target, frame->texture_target());
+
+ // Misuse the callback.
+ sync_point = 12;
+ mailbox_holder->Return(sync_point);
+ EXPECT_EQ(0u, called_sync_point);
+
+ // Finish using the mailbox_holder and drop our reference.
+ sync_point = 10;
+ mailbox_holder->Return(sync_point);
+ }
+ EXPECT_EQ(0u, called_sync_point);
+ }
+ EXPECT_EQ(sync_point, called_sync_point);
+}
+
+// If a caller has taken ownership of the texture mailbox, it should
+// not be released when the VideoFrame is destroyed, but should when
+// the TextureNoLongerNeededCallback is called.
+TEST(VideoFrame,
+ TextureNoLongerNeededCallbackAfterTakingMailboxWithDestroyedFrame) {
+ uint32 called_sync_point = 0;
+
+ gpu::Mailbox mailbox;
+ mailbox.name[0] = 50;
+ uint32 sync_point = 7;
+ uint32 target = 9;
+
+ {
+ scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder;
+
+ {
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
+ new VideoFrame::MailboxHolder(
+ mailbox,
+ sync_point,
+ base::Bind(&TextureCallback, &called_sync_point)),
+ target,
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
+ base::Closure()); // no_longer_needed_cb
+
+ mailbox_holder = frame->texture_mailbox();
+
+ EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point());
+ EXPECT_EQ(target, frame->texture_target());
+
+ // Keep a ref on the mailbox_holder after the VideoFrame is dropped.
+ }
+ EXPECT_EQ(0u, called_sync_point);
+
+ // Misuse the callback.
+ sync_point = 12;
+ mailbox_holder->Return(sync_point);
+ EXPECT_EQ(0u, called_sync_point);
+
+ // Finish using the mailbox_holder and drop our ref.
+ sync_point = 10;
+ mailbox_holder->Return(sync_point);
+ }
+ EXPECT_EQ(sync_point, called_sync_point);
+}
+
+// If a caller has taken ownership of the texture mailbox, but does
+// not call the callback, it should still happen with the original
+// sync point.
+TEST(VideoFrame,
+ TextureNoLongerNeededCallbackWhenNotCallingAndFrameDestroyed) {
+ uint32 called_sync_point = 0;
+
+ gpu::Mailbox mailbox;
+ mailbox.name[0] = 50;
+ uint32 sync_point = 7;
+ uint32 target = 9;
+
+ {
+ scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder;
+
+ {
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
+ new VideoFrame::MailboxHolder(
+ mailbox,
+ sync_point,
+ base::Bind(&TextureCallback, &called_sync_point)),
+ target,
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
+ base::Closure()); // no_longer_needed_cb
+
+ mailbox_holder = frame->texture_mailbox();
+
+ EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point());
+ EXPECT_EQ(target, frame->texture_target());
+
+ // Destroy the video frame.
+ }
+ EXPECT_EQ(0u, called_sync_point);
+
+ // Drop the reference on the mailbox without using it at all.
+ }
+ EXPECT_EQ(sync_point, called_sync_point);
+}
+
+// If a caller has taken ownership of the texture mailbox, but does
+// not call the callback, it should still happen with the original
+// sync point.
+TEST(VideoFrame,
+ TextureNoLongerNeededCallbackAfterTakingMailboxAndNotCalling) {
+ uint32 called_sync_point = 0;
+
+ gpu::Mailbox mailbox;
+ mailbox.name[0] = 50;
+ uint32 sync_point = 7;
+ uint32 target = 9;
+
+ {
+ scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
+ new VideoFrame::MailboxHolder(
+ mailbox,
+ sync_point,
+ base::Bind(&TextureCallback, &called_sync_point)),
+ target,
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
+ base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
+ base::Closure()); // no_longer_needed_cb
+
+ scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder =
+ frame->texture_mailbox();
+
+ EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
+ EXPECT_EQ(sync_point, mailbox_holder->sync_point());
+ EXPECT_EQ(target, frame->texture_target());
+
+ EXPECT_EQ(0u, called_sync_point);
+
+ // Don't use the mailbox at all and drop our ref on it.
+ }
+ // The VideoFrame is destroyed, it should call the callback.
+ EXPECT_EQ(sync_point, called_sync_point);
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_renderer.cc b/chromium/media/base/video_renderer.cc
new file mode 100644
index 00000000000..00a8f21b031
--- /dev/null
+++ b/chromium/media/base/video_renderer.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_renderer.h"
+
+namespace media {
+
+VideoRenderer::VideoRenderer() {}
+VideoRenderer::~VideoRenderer() {}
+
+} // namespace media
diff --git a/chromium/media/base/video_renderer.h b/chromium/media/base/video_renderer.h
new file mode 100644
index 00000000000..84ce6cfa017
--- /dev/null
+++ b/chromium/media/base/video_renderer.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_RENDERER_H_
+#define MEDIA_BASE_VIDEO_RENDERER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+
+namespace gfx {
+class Size;
+}
+
+namespace media {
+
+class DemuxerStream;
+class VideoDecoder;
+
+class MEDIA_EXPORT VideoRenderer {
+ public:
+ // Used to update the pipeline's clock time. The parameter is the time that
+ // the clock should not exceed.
+ typedef base::Callback<void(base::TimeDelta)> TimeCB;
+
+ // Executed when the natural size of the video has changed.
+ typedef base::Callback<void(const gfx::Size& size)> NaturalSizeChangedCB;
+
+ // Used to query the current time or duration of the media.
+ typedef base::Callback<base::TimeDelta()> TimeDeltaCB;
+
+ VideoRenderer();
+ virtual ~VideoRenderer();
+
+ // Initialize a VideoRenderer with |stream|, executing |init_cb| upon
+ // completion.
+ //
+ // |statistics_cb| is executed periodically with video rendering stats, such
+ // as dropped frames.
+ //
+ // |time_cb| is executed whenever time has advanced by way of video rendering.
+ //
+ // |size_changed_cb| is executed whenever the dimensions of the video has
+ // changed.
+ //
+ // |ended_cb| is executed when video rendering has reached the end of stream.
+ //
+ // |error_cb| is executed if an error was encountered.
+ //
+ // |get_time_cb| is used to query the current media playback time.
+ //
+ // |get_duration_cb| is used to query the media duration.
+ virtual void Initialize(DemuxerStream* stream,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const TimeCB& time_cb,
+ const NaturalSizeChangedCB& size_changed_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeDeltaCB& get_time_cb,
+ const TimeDeltaCB& get_duration_cb) = 0;
+
+ // Start audio decoding and rendering at the current playback rate, executing
+ // |callback| when playback is underway.
+ virtual void Play(const base::Closure& callback) = 0;
+
+ // Temporarily suspend decoding and rendering video, executing |callback| when
+ // playback has been suspended.
+ virtual void Pause(const base::Closure& callback) = 0;
+
+ // Discard any video data, executing |callback| when completed.
+ virtual void Flush(const base::Closure& callback) = 0;
+
+ // Start prerolling video data for samples starting at |time|, executing
+ // |callback| when completed.
+ //
+ // Only valid to call after a successful Initialize() or Flush().
+ virtual void Preroll(base::TimeDelta time,
+ const PipelineStatusCB& callback) = 0;
+
+ // Stop all operations in preparation for being deleted, executing |callback|
+ // when complete.
+ virtual void Stop(const base::Closure& callback) = 0;
+
+ // Updates the current playback rate.
+ virtual void SetPlaybackRate(float playback_rate) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoRenderer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_RENDERER_H_
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
new file mode 100644
index 00000000000..fda758efecb
--- /dev/null
+++ b/chromium/media/base/video_util.cc
@@ -0,0 +1,308 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_util.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "media/base/video_frame.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+gfx::Size GetNaturalSize(const gfx::Size& visible_size,
+ int aspect_ratio_numerator,
+ int aspect_ratio_denominator) {
+ if (aspect_ratio_denominator == 0 ||
+ aspect_ratio_numerator < 0 ||
+ aspect_ratio_denominator < 0)
+ return gfx::Size();
+
+ double aspect_ratio = aspect_ratio_numerator /
+ static_cast<double>(aspect_ratio_denominator);
+
+ int width = floor(visible_size.width() * aspect_ratio + 0.5);
+ int height = visible_size.height();
+
+ // An even width makes things easier for YV12 and appears to be the behavior
+ // expected by WebKit layout tests.
+ return gfx::Size(width & ~1, height);
+}
+
+void CopyPlane(size_t plane, const uint8* source, int stride, int rows,
+ VideoFrame* frame) {
+ uint8* dest = frame->data(plane);
+ int dest_stride = frame->stride(plane);
+
+ // Clamp in case source frame has smaller stride.
+ int bytes_to_copy_per_row = std::min(frame->row_bytes(plane), stride);
+
+ // Clamp in case source frame has smaller height.
+ int rows_to_copy = std::min(frame->rows(plane), rows);
+
+ // Copy!
+ for (int row = 0; row < rows_to_copy; ++row) {
+ memcpy(dest, source, bytes_to_copy_per_row);
+ source += stride;
+ dest += dest_stride;
+ }
+}
+
+void CopyYPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
+ CopyPlane(VideoFrame::kYPlane, source, stride, rows, frame);
+}
+
+void CopyUPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
+ CopyPlane(VideoFrame::kUPlane, source, stride, rows, frame);
+}
+
+void CopyVPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
+ CopyPlane(VideoFrame::kVPlane, source, stride, rows, frame);
+}
+
+void CopyAPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
+ CopyPlane(VideoFrame::kAPlane, source, stride, rows, frame);
+}
+
+void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame) {
+ int rows_to_clear = std::min(frame->rows(VideoFrame::kAPlane), rows);
+ memset(frame->data(VideoFrame::kAPlane), 255,
+ frame->stride(VideoFrame::kAPlane) * rows_to_clear);
+}
+
+void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v) {
+ // Fill the Y plane.
+ uint8* y_plane = frame->data(VideoFrame::kYPlane);
+ int y_rows = frame->rows(VideoFrame::kYPlane);
+ int y_row_bytes = frame->row_bytes(VideoFrame::kYPlane);
+ for (int i = 0; i < y_rows; ++i) {
+ memset(y_plane, y, y_row_bytes);
+ y_plane += frame->stride(VideoFrame::kYPlane);
+ }
+
+ // Fill the U and V planes.
+ uint8* u_plane = frame->data(VideoFrame::kUPlane);
+ uint8* v_plane = frame->data(VideoFrame::kVPlane);
+ int uv_rows = frame->rows(VideoFrame::kUPlane);
+ int u_row_bytes = frame->row_bytes(VideoFrame::kUPlane);
+ int v_row_bytes = frame->row_bytes(VideoFrame::kVPlane);
+ for (int i = 0; i < uv_rows; ++i) {
+ memset(u_plane, u, u_row_bytes);
+ memset(v_plane, v, v_row_bytes);
+ u_plane += frame->stride(VideoFrame::kUPlane);
+ v_plane += frame->stride(VideoFrame::kVPlane);
+ }
+}
+
+static void LetterboxPlane(VideoFrame* frame,
+ int plane,
+ const gfx::Rect& view_area,
+ uint8 fill_byte) {
+ uint8* ptr = frame->data(plane);
+ const int rows = frame->rows(plane);
+ const int row_bytes = frame->row_bytes(plane);
+ const int stride = frame->stride(plane);
+
+ CHECK_GE(stride, row_bytes);
+ CHECK_GE(view_area.x(), 0);
+ CHECK_GE(view_area.y(), 0);
+ CHECK_LE(view_area.right(), row_bytes);
+ CHECK_LE(view_area.bottom(), rows);
+
+ int y = 0;
+ for (; y < view_area.y(); y++) {
+ memset(ptr, fill_byte, row_bytes);
+ ptr += stride;
+ }
+ if (view_area.width() < row_bytes) {
+ for (; y < view_area.bottom(); y++) {
+ if (view_area.x() > 0) {
+ memset(ptr, fill_byte, view_area.x());
+ }
+ if (view_area.right() < row_bytes) {
+ memset(ptr + view_area.right(),
+ fill_byte,
+ row_bytes - view_area.right());
+ }
+ ptr += stride;
+ }
+ } else {
+ y += view_area.height();
+ ptr += stride * view_area.height();
+ }
+ for (; y < rows; y++) {
+ memset(ptr, fill_byte, row_bytes);
+ ptr += stride;
+ }
+}
+
+void LetterboxYUV(VideoFrame* frame, const gfx::Rect& view_area) {
+ DCHECK(!(view_area.x() & 1));
+ DCHECK(!(view_area.y() & 1));
+ DCHECK(!(view_area.width() & 1));
+ DCHECK(!(view_area.height() & 1));
+ DCHECK(frame->format() == VideoFrame::YV12 ||
+ frame->format() == VideoFrame::I420);
+ LetterboxPlane(frame, VideoFrame::kYPlane, view_area, 0x00);
+ gfx::Rect half_view_area(view_area.x() / 2,
+ view_area.y() / 2,
+ view_area.width() / 2,
+ view_area.height() / 2);
+ LetterboxPlane(frame, VideoFrame::kUPlane, half_view_area, 0x80);
+ LetterboxPlane(frame, VideoFrame::kVPlane, half_view_area, 0x80);
+}
+
+void RotatePlaneByPixels(
+ const uint8* src,
+ uint8* dest,
+ int width,
+ int height,
+ int rotation, // Clockwise.
+ bool flip_vert,
+ bool flip_horiz) {
+ DCHECK((width > 0) && (height > 0) &&
+ ((width & 1) == 0) && ((height & 1) == 0) &&
+ (rotation >= 0) && (rotation < 360) && (rotation % 90 == 0));
+
+ // Consolidate cases. Only 0 and 90 are left.
+ if (rotation == 180 || rotation == 270) {
+ rotation -= 180;
+ flip_vert = !flip_vert;
+ flip_horiz = !flip_horiz;
+ }
+
+ int num_rows = height;
+ int num_cols = width;
+ int src_stride = width;
+ // During pixel copying, the corresponding incremental of dest pointer
+ // when src pointer moves to next row.
+ int dest_row_step = width;
+ // During pixel copying, the corresponding incremental of dest pointer
+ // when src pointer moves to next column.
+ int dest_col_step = 1;
+
+ if (rotation == 0) {
+ if (flip_horiz) {
+ // Use pixel copying.
+ dest_col_step = -1;
+ if (flip_vert) {
+ // Rotation 180.
+ dest_row_step = -width;
+ dest += height * width - 1;
+ } else {
+ dest += width - 1;
+ }
+ } else {
+ if (flip_vert) {
+ // Fast copy by rows.
+ dest += width * (height - 1);
+ for (int row = 0; row < height; ++row) {
+ memcpy(dest, src, width);
+ src += width;
+ dest -= width;
+ }
+ } else {
+ memcpy(dest, src, width * height);
+ }
+ return;
+ }
+ } else if (rotation == 90) {
+ int offset;
+ if (width > height) {
+ offset = (width - height) / 2;
+ src += offset;
+ num_rows = num_cols = height;
+ } else {
+ offset = (height - width) / 2;
+ src += width * offset;
+ num_rows = num_cols = width;
+ }
+
+ dest_col_step = (flip_vert ? -width : width);
+ dest_row_step = (flip_horiz ? 1 : -1);
+ if (flip_horiz) {
+ if (flip_vert) {
+ dest += (width > height ? width * (height - 1) + offset :
+ width * (height - offset - 1));
+ } else {
+ dest += (width > height ? offset : width * offset);
+ }
+ } else {
+ if (flip_vert) {
+ dest += (width > height ? width * height - offset - 1 :
+ width * (height - offset) - 1);
+ } else {
+ dest += (width > height ? width - offset - 1 :
+ width * (offset + 1) - 1);
+ }
+ }
+ } else {
+ NOTREACHED();
+ }
+
+ // Copy pixels.
+ for (int row = 0; row < num_rows; ++row) {
+ const uint8* src_ptr = src;
+ uint8* dest_ptr = dest;
+ for (int col = 0; col < num_cols; ++col) {
+ *dest_ptr = *src_ptr++;
+ dest_ptr += dest_col_step;
+ }
+ src += src_stride;
+ dest += dest_row_step;
+ }
+}
+
+gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
+ const gfx::Size& content) {
+ // If |content| has an undefined aspect ratio, let's not try to divide by
+ // zero.
+ if (content.IsEmpty())
+ return gfx::Rect();
+
+ int64 x = static_cast<int64>(content.width()) * bounds.height();
+ int64 y = static_cast<int64>(content.height()) * bounds.width();
+
+ gfx::Size letterbox(bounds.width(), bounds.height());
+ if (y < x)
+ letterbox.set_height(static_cast<int>(y / content.width()));
+ else
+ letterbox.set_width(static_cast<int>(x / content.height()));
+ gfx::Rect result = bounds;
+ result.ClampToCenteredSize(letterbox);
+ return result;
+}
+
+void CopyRGBToVideoFrame(const uint8* source,
+ int stride,
+ const gfx::Rect& region_in_frame,
+ VideoFrame* frame) {
+ const int kY = VideoFrame::kYPlane;
+ const int kU = VideoFrame::kUPlane;
+ const int kV = VideoFrame::kVPlane;
+ CHECK_EQ(frame->stride(kU), frame->stride(kV));
+ const int uv_stride = frame->stride(kU);
+
+ if (region_in_frame != gfx::Rect(frame->coded_size())) {
+ LetterboxYUV(frame, region_in_frame);
+ }
+
+ const int y_offset = region_in_frame.x()
+ + (region_in_frame.y() * frame->stride(kY));
+ const int uv_offset = region_in_frame.x() / 2
+ + (region_in_frame.y() / 2 * uv_stride);
+
+ ConvertRGB32ToYUV(source,
+ frame->data(kY) + y_offset,
+ frame->data(kU) + uv_offset,
+ frame->data(kV) + uv_offset,
+ region_in_frame.width(),
+ region_in_frame.height(),
+ stride,
+ frame->stride(kY),
+ uv_stride);
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_util.h b/chromium/media/base/video_util.h
new file mode 100644
index 00000000000..5788015dabf
--- /dev/null
+++ b/chromium/media/base/video_util.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_UTIL_H_
+#define MEDIA_BASE_VIDEO_UTIL_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+
+class VideoFrame;
+
+// Computes the size of |visible_size| for a given aspect ratio.
+MEDIA_EXPORT gfx::Size GetNaturalSize(const gfx::Size& visible_size,
+ int aspect_ratio_numerator,
+ int aspect_ratio_denominator);
+
+// Copies a plane of YUV(A) source into a VideoFrame object, taking into account
+// source and destinations dimensions.
+//
+// NOTE: rows is *not* the same as height!
+MEDIA_EXPORT void CopyYPlane(const uint8* source, int stride, int rows,
+ VideoFrame* frame);
+MEDIA_EXPORT void CopyUPlane(const uint8* source, int stride, int rows,
+ VideoFrame* frame);
+MEDIA_EXPORT void CopyVPlane(const uint8* source, int stride, int rows,
+ VideoFrame* frame);
+MEDIA_EXPORT void CopyAPlane(const uint8* source, int stride, int rows,
+ VideoFrame* frame);
+
+// Sets alpha plane values to be completely opaque (all 255's).
+MEDIA_EXPORT void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame);
+
+// |plane| is one of VideoFrame::kYPlane, VideoFrame::kUPlane,
+// VideoFrame::kVPlane or VideoFrame::kAPlane
+MEDIA_EXPORT void CopyPlane(size_t plane, const uint8* source, int stride,
+ int rows, VideoFrame* frame);
+
+
+// Fills |frame| containing YUV data to the given color values.
+MEDIA_EXPORT void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v);
+
+// Creates a border in |frame| such that all pixels outside of
+// |view_area| are black. The size and position of |view_area|
+// must be even to align correctly with the color planes.
+// Only YV12 format video frames are currently supported.
+MEDIA_EXPORT void LetterboxYUV(VideoFrame* frame,
+ const gfx::Rect& view_area);
+
+// Rotates |src| plane by |rotation| degree with possible flipping vertically
+// and horizontally.
+// |rotation| is limited to {0, 90, 180, 270}.
+// |width| and |height| are expected to be even numbers.
+// Both |src| and |dest| planes are packed and have same |width| and |height|.
+// When |width| != |height| and rotated by 90/270, only the maximum square
+// portion located in the center is rotated. For example, for width=640 and
+// height=480, the rotated area is 480x480 located from row 0 through 479 and
+// from column 80 through 559. The leftmost and rightmost 80 columns are
+// ignored for both |src| and |dest|.
+// The caller is responsible for blanking out the margin area.
+MEDIA_EXPORT void RotatePlaneByPixels(
+ const uint8* src,
+ uint8* dest,
+ int width,
+ int height,
+ int rotation, // Clockwise.
+ bool flip_vert,
+ bool flip_horiz);
+
+// Return the largest centered rectangle with the same aspect ratio of |content|
+// that fits entirely inside of |bounds|. If |content| is empty, its aspect
+// ratio would be undefined; and in this case an empty Rect would be returned.
+MEDIA_EXPORT gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
+ const gfx::Size& content);
+
+// Copy an RGB bitmap into the specified |region_in_frame| of a YUV video frame.
+// Fills the regions outside |region_in_frame| with black.
+MEDIA_EXPORT void CopyRGBToVideoFrame(const uint8* source,
+ int stride,
+ const gfx::Rect& region_in_frame,
+ VideoFrame* frame);
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_UTIL_H_
diff --git a/chromium/media/base/video_util_unittest.cc b/chromium/media/base/video_util_unittest.cc
new file mode 100644
index 00000000000..4f4ee3186a4
--- /dev/null
+++ b/chromium/media/base/video_util_unittest.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class VideoUtilTest : public testing::Test {
+ public:
+ VideoUtilTest()
+ : height_(0),
+ y_stride_(0),
+ u_stride_(0),
+ v_stride_(0) {
+ }
+
+ virtual ~VideoUtilTest() {}
+
+ void CreateSourceFrame(int width, int height,
+ int y_stride, int u_stride, int v_stride) {
+ EXPECT_GE(y_stride, width);
+ EXPECT_GE(u_stride, width / 2);
+ EXPECT_GE(v_stride, width / 2);
+
+ height_ = height;
+ y_stride_ = y_stride;
+ u_stride_ = u_stride;
+ v_stride_ = v_stride;
+
+ y_plane_.reset(new uint8[y_stride * height]);
+ u_plane_.reset(new uint8[u_stride * height / 2]);
+ v_plane_.reset(new uint8[v_stride * height / 2]);
+ }
+
+ void CreateDestinationFrame(int width, int height) {
+ gfx::Size size(width, height);
+ destination_frame_ =
+ VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), size,
+ base::TimeDelta());
+ }
+
+ void CopyPlanes() {
+ CopyYPlane(y_plane_.get(), y_stride_, height_, destination_frame_.get());
+ CopyUPlane(
+ u_plane_.get(), u_stride_, height_ / 2, destination_frame_.get());
+ CopyVPlane(
+ v_plane_.get(), v_stride_, height_ / 2, destination_frame_.get());
+ }
+
+ private:
+ scoped_ptr<uint8[]> y_plane_;
+ scoped_ptr<uint8[]> u_plane_;
+ scoped_ptr<uint8[]> v_plane_;
+
+ int height_;
+ int y_stride_;
+ int u_stride_;
+ int v_stride_;
+
+ scoped_refptr<VideoFrame> destination_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoUtilTest);
+};
+
+TEST_F(VideoUtilTest, CopyPlane_Exact) {
+ CreateSourceFrame(16, 16, 16, 8, 8);
+ CreateDestinationFrame(16, 16);
+ CopyPlanes();
+}
+
+TEST_F(VideoUtilTest, CopyPlane_SmallerSource) {
+ CreateSourceFrame(8, 8, 8, 4, 4);
+ CreateDestinationFrame(16, 16);
+ CopyPlanes();
+}
+
+TEST_F(VideoUtilTest, CopyPlane_SmallerDestination) {
+ CreateSourceFrame(16, 16, 16, 8, 8);
+ CreateDestinationFrame(8, 8);
+ CopyPlanes();
+}
+
+namespace {
+
+uint8 src6x4[] = {
+ 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23
+};
+
+// Target images, name pattern target_rotation_flipV_flipH.
+uint8* target6x4_0_n_n = src6x4;
+
+uint8 target6x4_0_n_y[] = {
+ 5, 4, 3, 2, 1, 0,
+ 11, 10, 9, 8, 7, 6,
+ 17, 16, 15, 14, 13, 12,
+ 23, 22, 21, 20, 19, 18
+};
+
+uint8 target6x4_0_y_n[] = {
+ 18, 19, 20, 21, 22, 23,
+ 12, 13, 14, 15, 16, 17,
+ 6, 7, 8, 9, 10, 11,
+ 0, 1, 2, 3, 4, 5
+};
+
+uint8 target6x4_0_y_y[] = {
+ 23, 22, 21, 20, 19, 18,
+ 17, 16, 15, 14, 13, 12,
+ 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1, 0
+};
+
+uint8 target6x4_90_n_n[] = {
+ 255, 19, 13, 7, 1, 255,
+ 255, 20, 14, 8, 2, 255,
+ 255, 21, 15, 9, 3, 255,
+ 255, 22, 16, 10, 4, 255
+};
+
+uint8 target6x4_90_n_y[] = {
+ 255, 1, 7, 13, 19, 255,
+ 255, 2, 8, 14, 20, 255,
+ 255, 3, 9, 15, 21, 255,
+ 255, 4, 10, 16, 22, 255
+};
+
+uint8 target6x4_90_y_n[] = {
+ 255, 22, 16, 10, 4, 255,
+ 255, 21, 15, 9, 3, 255,
+ 255, 20, 14, 8, 2, 255,
+ 255, 19, 13, 7, 1, 255
+};
+
+uint8 target6x4_90_y_y[] = {
+ 255, 4, 10, 16, 22, 255,
+ 255, 3, 9, 15, 21, 255,
+ 255, 2, 8, 14, 20, 255,
+ 255, 1, 7, 13, 19, 255
+};
+
+uint8* target6x4_180_n_n = target6x4_0_y_y;
+uint8* target6x4_180_n_y = target6x4_0_y_n;
+uint8* target6x4_180_y_n = target6x4_0_n_y;
+uint8* target6x4_180_y_y = target6x4_0_n_n;
+
+uint8* target6x4_270_n_n = target6x4_90_y_y;
+uint8* target6x4_270_n_y = target6x4_90_y_n;
+uint8* target6x4_270_y_n = target6x4_90_n_y;
+uint8* target6x4_270_y_y = target6x4_90_n_n;
+
+uint8 src4x6[] = {
+ 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 16, 17, 18, 19,
+ 20, 21, 22, 23
+};
+
+uint8* target4x6_0_n_n = src4x6;
+
+uint8 target4x6_0_n_y[] = {
+ 3, 2, 1, 0,
+ 7, 6, 5, 4,
+ 11, 10, 9, 8,
+ 15, 14, 13, 12,
+ 19, 18, 17, 16,
+ 23, 22, 21, 20
+};
+
+uint8 target4x6_0_y_n[] = {
+ 20, 21, 22, 23,
+ 16, 17, 18, 19,
+ 12, 13, 14, 15,
+ 8, 9, 10, 11,
+ 4, 5, 6, 7,
+ 0, 1, 2, 3
+};
+
+uint8 target4x6_0_y_y[] = {
+ 23, 22, 21, 20,
+ 19, 18, 17, 16,
+ 15, 14, 13, 12,
+ 11, 10, 9, 8,
+ 7, 6, 5, 4,
+ 3, 2, 1, 0
+};
+
+uint8 target4x6_90_n_n[] = {
+ 255, 255, 255, 255,
+ 16, 12, 8, 4,
+ 17, 13, 9, 5,
+ 18, 14, 10, 6,
+ 19, 15, 11, 7,
+ 255, 255, 255, 255
+};
+
+uint8 target4x6_90_n_y[] = {
+ 255, 255, 255, 255,
+ 4, 8, 12, 16,
+ 5, 9, 13, 17,
+ 6, 10, 14, 18,
+ 7, 11, 15, 19,
+ 255, 255, 255, 255
+};
+
+uint8 target4x6_90_y_n[] = {
+ 255, 255, 255, 255,
+ 19, 15, 11, 7,
+ 18, 14, 10, 6,
+ 17, 13, 9, 5,
+ 16, 12, 8, 4,
+ 255, 255, 255, 255
+};
+
+uint8 target4x6_90_y_y[] = {
+ 255, 255, 255, 255,
+ 7, 11, 15, 19,
+ 6, 10, 14, 18,
+ 5, 9, 13, 17,
+ 4, 8, 12, 16,
+ 255, 255, 255, 255
+};
+
+uint8* target4x6_180_n_n = target4x6_0_y_y;
+uint8* target4x6_180_n_y = target4x6_0_y_n;
+uint8* target4x6_180_y_n = target4x6_0_n_y;
+uint8* target4x6_180_y_y = target4x6_0_n_n;
+
+uint8* target4x6_270_n_n = target4x6_90_y_y;
+uint8* target4x6_270_n_y = target4x6_90_y_n;
+uint8* target4x6_270_y_n = target4x6_90_n_y;
+uint8* target4x6_270_y_y = target4x6_90_n_n;
+
+struct VideoRotationTestData {
+ uint8* src;
+ uint8* target;
+ int width;
+ int height;
+ int rotation;
+ bool flip_vert;
+ bool flip_horiz;
+};
+
+const VideoRotationTestData kVideoRotationTestData[] = {
+ { src6x4, target6x4_0_n_n, 6, 4, 0, false, false },
+ { src6x4, target6x4_0_n_y, 6, 4, 0, false, true },
+ { src6x4, target6x4_0_y_n, 6, 4, 0, true, false },
+ { src6x4, target6x4_0_y_y, 6, 4, 0, true, true },
+
+ { src6x4, target6x4_90_n_n, 6, 4, 90, false, false },
+ { src6x4, target6x4_90_n_y, 6, 4, 90, false, true },
+ { src6x4, target6x4_90_y_n, 6, 4, 90, true, false },
+ { src6x4, target6x4_90_y_y, 6, 4, 90, true, true },
+
+ { src6x4, target6x4_180_n_n, 6, 4, 180, false, false },
+ { src6x4, target6x4_180_n_y, 6, 4, 180, false, true },
+ { src6x4, target6x4_180_y_n, 6, 4, 180, true, false },
+ { src6x4, target6x4_180_y_y, 6, 4, 180, true, true },
+
+ { src6x4, target6x4_270_n_n, 6, 4, 270, false, false },
+ { src6x4, target6x4_270_n_y, 6, 4, 270, false, true },
+ { src6x4, target6x4_270_y_n, 6, 4, 270, true, false },
+ { src6x4, target6x4_270_y_y, 6, 4, 270, true, true },
+
+ { src4x6, target4x6_0_n_n, 4, 6, 0, false, false },
+ { src4x6, target4x6_0_n_y, 4, 6, 0, false, true },
+ { src4x6, target4x6_0_y_n, 4, 6, 0, true, false },
+ { src4x6, target4x6_0_y_y, 4, 6, 0, true, true },
+
+ { src4x6, target4x6_90_n_n, 4, 6, 90, false, false },
+ { src4x6, target4x6_90_n_y, 4, 6, 90, false, true },
+ { src4x6, target4x6_90_y_n, 4, 6, 90, true, false },
+ { src4x6, target4x6_90_y_y, 4, 6, 90, true, true },
+
+ { src4x6, target4x6_180_n_n, 4, 6, 180, false, false },
+ { src4x6, target4x6_180_n_y, 4, 6, 180, false, true },
+ { src4x6, target4x6_180_y_n, 4, 6, 180, true, false },
+ { src4x6, target4x6_180_y_y, 4, 6, 180, true, true },
+
+ { src4x6, target4x6_270_n_n, 4, 6, 270, false, false },
+ { src4x6, target4x6_270_n_y, 4, 6, 270, false, true },
+ { src4x6, target4x6_270_y_n, 4, 6, 270, true, false },
+ { src4x6, target4x6_270_y_y, 4, 6, 270, true, true }
+};
+
+} // namespace
+
+class VideoUtilRotationTest
+ : public testing::TestWithParam<VideoRotationTestData> {
+ public:
+ VideoUtilRotationTest() {
+ dest_.reset(new uint8[GetParam().width * GetParam().height]);
+ }
+
+ virtual ~VideoUtilRotationTest() {}
+
+ uint8* dest_plane() { return dest_.get(); }
+
+ private:
+ scoped_ptr<uint8[]> dest_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoUtilRotationTest);
+};
+
+TEST_P(VideoUtilRotationTest, Rotate) {
+ int rotation = GetParam().rotation;
+ EXPECT_TRUE((rotation >= 0) && (rotation < 360) && (rotation % 90 == 0));
+
+ int size = GetParam().width * GetParam().height;
+ uint8* dest = dest_plane();
+ memset(dest, 255, size);
+
+ RotatePlaneByPixels(GetParam().src, dest, GetParam().width,
+ GetParam().height, rotation,
+ GetParam().flip_vert, GetParam().flip_horiz);
+
+ EXPECT_EQ(memcmp(dest, GetParam().target, size), 0);
+}
+
+INSTANTIATE_TEST_CASE_P(, VideoUtilRotationTest,
+ testing::ValuesIn(kVideoRotationTestData));
+
+TEST_F(VideoUtilTest, ComputeLetterboxRegion) {
+ EXPECT_EQ(gfx::Rect(167, 0, 666, 500),
+ ComputeLetterboxRegion(gfx::Rect(0, 0, 1000, 500),
+ gfx::Size(640, 480)));
+ EXPECT_EQ(gfx::Rect(0, 312, 500, 375),
+ ComputeLetterboxRegion(gfx::Rect(0, 0, 500, 1000),
+ gfx::Size(640, 480)));
+ EXPECT_EQ(gfx::Rect(56, 0, 888, 500),
+ ComputeLetterboxRegion(gfx::Rect(0, 0, 1000, 500),
+ gfx::Size(1920, 1080)));
+ EXPECT_EQ(gfx::Rect(0, 12, 100, 75),
+ ComputeLetterboxRegion(gfx::Rect(0, 0, 100, 100),
+ gfx::Size(400, 300)));
+ EXPECT_EQ(gfx::Rect(0, 250000000, 2000000000, 1500000000),
+ ComputeLetterboxRegion(gfx::Rect(0, 0, 2000000000, 2000000000),
+ gfx::Size(40000, 30000)));
+ EXPECT_TRUE(ComputeLetterboxRegion(gfx::Rect(0, 0, 2000000000, 2000000000),
+ gfx::Size(0, 0)).IsEmpty());
+}
+
+TEST_F(VideoUtilTest, LetterboxYUV) {
+ int width = 40;
+ int height = 30;
+ gfx::Size size(width, height);
+ scoped_refptr<VideoFrame> frame(
+ VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), size,
+ base::TimeDelta()));
+
+ for (int left_margin = 0; left_margin <= 10; left_margin += 10) {
+ for (int right_margin = 0; right_margin <= 10; right_margin += 10) {
+ for (int top_margin = 0; top_margin <= 10; top_margin += 10) {
+ for (int bottom_margin = 0; bottom_margin <= 10; bottom_margin += 10) {
+ gfx::Rect view_area(left_margin, top_margin,
+ width - left_margin - right_margin,
+ height - top_margin - bottom_margin);
+ FillYUV(frame.get(), 0x1, 0x2, 0x3);
+ LetterboxYUV(frame.get(), view_area);
+ for (int x = 0; x < width; x++) {
+ for (int y = 0; y < height; y++) {
+ bool inside = x >= view_area.x() &&
+ x < view_area.x() + view_area.width() &&
+ y >= view_area.y() &&
+ y < view_area.y() + view_area.height();
+ EXPECT_EQ(frame->data(VideoFrame::kYPlane)[
+ y * frame->stride(VideoFrame::kYPlane) + x],
+ inside ? 0x01 : 0x00);
+ EXPECT_EQ(frame->data(VideoFrame::kUPlane)[
+ (y / 2) * frame->stride(VideoFrame::kUPlane) + (x / 2)],
+ inside ? 0x02 : 0x80);
+ EXPECT_EQ(frame->data(VideoFrame::kVPlane)[
+ (y / 2) * frame->stride(VideoFrame::kVPlane) + (x / 2)],
+ inside ? 0x03 : 0x80);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/yuv_convert.cc b/chromium/media/base/yuv_convert.cc
new file mode 100644
index 00000000000..893b53df147
--- /dev/null
+++ b/chromium/media/base/yuv_convert.cc
@@ -0,0 +1,654 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This webpage shows layout of YV12 and other YUV formats
+// http://www.fourcc.org/yuv.php
+// The actual conversion is best described here
+// http://en.wikipedia.org/wiki/YUV
+// An article on optimizing YUV conversion using tables instead of multiplies
+// http://lestourtereaux.free.fr/papers/data/yuvrgb.pdf
+//
+// YV12 is a full plane of Y and a half height, half width chroma planes
+// YV16 is a full plane of Y and a full height, half width chroma planes
+//
+// ARGB pixel format is output, which on little endian is stored as BGRA.
+// The alpha is set to 255, allowing the application to use RGBA or RGB32.
+
+#include "media/base/yuv_convert.h"
+
+#include "base/cpu.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "build/build_config.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
+#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/simd/filter_yuv.h"
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#else
+#include <mmintrin.h>
+#endif
+#endif
+
+// Assembly functions are declared without namespace.
+extern "C" { void EmptyRegisterState_MMX(); } // extern "C"
+
+namespace media {
+
+typedef void (*FilterYUVRowsProc)(uint8*, const uint8*, const uint8*, int, int);
+
+typedef void (*ConvertRGBToYUVProc)(const uint8*,
+ uint8*,
+ uint8*,
+ uint8*,
+ int,
+ int,
+ int,
+ int,
+ int);
+
+typedef void (*ConvertYUVToRGB32Proc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ int,
+ int,
+ int,
+ int,
+ int,
+ YUVType);
+
+typedef void (*ConvertYUVAToARGBProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ int,
+ int,
+ int,
+ int,
+ int,
+ int,
+ YUVType);
+
+typedef void (*ConvertYUVToRGB32RowProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ ptrdiff_t);
+
+typedef void (*ConvertYUVAToARGBRowProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ ptrdiff_t);
+
+typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ ptrdiff_t,
+ ptrdiff_t);
+
+static FilterYUVRowsProc g_filter_yuv_rows_proc_ = NULL;
+static ConvertYUVToRGB32RowProc g_convert_yuv_to_rgb32_row_proc_ = NULL;
+static ScaleYUVToRGB32RowProc g_scale_yuv_to_rgb32_row_proc_ = NULL;
+static ScaleYUVToRGB32RowProc g_linear_scale_yuv_to_rgb32_row_proc_ = NULL;
+static ConvertRGBToYUVProc g_convert_rgb32_to_yuv_proc_ = NULL;
+static ConvertRGBToYUVProc g_convert_rgb24_to_yuv_proc_ = NULL;
+static ConvertYUVToRGB32Proc g_convert_yuv_to_rgb32_proc_ = NULL;
+static ConvertYUVAToARGBProc g_convert_yuva_to_argb_proc_ = NULL;
+
+// Empty SIMD registers state after using them.
+void EmptyRegisterStateStub() {}
+#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+void EmptyRegisterStateIntrinsic() { _mm_empty(); }
+#endif
+typedef void (*EmptyRegisterStateProc)();
+static EmptyRegisterStateProc g_empty_register_state_proc_ = NULL;
+
+void InitializeCPUSpecificYUVConversions() {
+ CHECK(!g_filter_yuv_rows_proc_);
+ CHECK(!g_convert_yuv_to_rgb32_row_proc_);
+ CHECK(!g_scale_yuv_to_rgb32_row_proc_);
+ CHECK(!g_linear_scale_yuv_to_rgb32_row_proc_);
+ CHECK(!g_convert_rgb32_to_yuv_proc_);
+ CHECK(!g_convert_rgb24_to_yuv_proc_);
+ CHECK(!g_convert_yuv_to_rgb32_proc_);
+ CHECK(!g_convert_yuva_to_argb_proc_);
+ CHECK(!g_empty_register_state_proc_);
+
+ g_filter_yuv_rows_proc_ = FilterYUVRows_C;
+ g_convert_yuv_to_rgb32_row_proc_ = ConvertYUVToRGB32Row_C;
+ g_scale_yuv_to_rgb32_row_proc_ = ScaleYUVToRGB32Row_C;
+ g_linear_scale_yuv_to_rgb32_row_proc_ = LinearScaleYUVToRGB32Row_C;
+ g_convert_rgb32_to_yuv_proc_ = ConvertRGB32ToYUV_C;
+ g_convert_rgb24_to_yuv_proc_ = ConvertRGB24ToYUV_C;
+ g_convert_yuv_to_rgb32_proc_ = ConvertYUVToRGB32_C;
+ g_convert_yuva_to_argb_proc_ = ConvertYUVAToARGB_C;
+ g_empty_register_state_proc_ = EmptyRegisterStateStub;
+
+#if defined(ARCH_CPU_X86_FAMILY)
+ base::CPU cpu;
+ if (cpu.has_mmx()) {
+ g_convert_yuv_to_rgb32_row_proc_ = ConvertYUVToRGB32Row_MMX;
+ g_scale_yuv_to_rgb32_row_proc_ = ScaleYUVToRGB32Row_MMX;
+ g_convert_yuv_to_rgb32_proc_ = ConvertYUVToRGB32_MMX;
+ g_convert_yuva_to_argb_proc_ = ConvertYUVAToARGB_MMX;
+ g_linear_scale_yuv_to_rgb32_row_proc_ = LinearScaleYUVToRGB32Row_MMX;
+
+#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+ g_filter_yuv_rows_proc_ = FilterYUVRows_MMX;
+ g_empty_register_state_proc_ = EmptyRegisterStateIntrinsic;
+#else
+ g_empty_register_state_proc_ = EmptyRegisterState_MMX;
+#endif
+ }
+
+ if (cpu.has_sse()) {
+ g_convert_yuv_to_rgb32_row_proc_ = ConvertYUVToRGB32Row_SSE;
+ g_scale_yuv_to_rgb32_row_proc_ = ScaleYUVToRGB32Row_SSE;
+ g_linear_scale_yuv_to_rgb32_row_proc_ = LinearScaleYUVToRGB32Row_SSE;
+ g_convert_yuv_to_rgb32_proc_ = ConvertYUVToRGB32_SSE;
+ }
+
+ if (cpu.has_sse2()) {
+ g_filter_yuv_rows_proc_ = FilterYUVRows_SSE2;
+ g_convert_rgb32_to_yuv_proc_ = ConvertRGB32ToYUV_SSE2;
+
+#if defined(ARCH_CPU_X86_64)
+ g_scale_yuv_to_rgb32_row_proc_ = ScaleYUVToRGB32Row_SSE2_X64;
+
+ // Technically this should be in the MMX section, but MSVC will optimize out
+ // the export of LinearScaleYUVToRGB32Row_MMX, which is required by the unit
+ // tests, if that decision can be made at compile time. Since all X64 CPUs
+ // have SSE2, we can hack around this by making the selection here.
+ g_linear_scale_yuv_to_rgb32_row_proc_ = LinearScaleYUVToRGB32Row_MMX_X64;
+#endif
+ }
+
+ if (cpu.has_ssse3()) {
+ g_convert_rgb24_to_yuv_proc_ = &ConvertRGB24ToYUV_SSSE3;
+
+ // TODO(hclam): Add ConvertRGB32ToYUV_SSSE3 when the cyan problem is solved.
+ // See: crbug.com/100462
+ }
+#endif
+}
+
+// Empty SIMD registers state after using them.
+void EmptyRegisterState() { g_empty_register_state_proc_(); }
+
+// 16.16 fixed point arithmetic
+const int kFractionBits = 16;
+const int kFractionMax = 1 << kFractionBits;
+const int kFractionMask = ((1 << kFractionBits) - 1);
+
+// Scale a frame of YUV to 32 bit ARGB.
+void ScaleYUVToRGB32(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int source_width,
+ int source_height,
+ int width,
+ int height,
+ int y_pitch,
+ int uv_pitch,
+ int rgb_pitch,
+ YUVType yuv_type,
+ Rotate view_rotate,
+ ScaleFilter filter) {
+ // Handle zero sized sources and destinations.
+ if ((yuv_type == YV12 && (source_width < 2 || source_height < 2)) ||
+ (yuv_type == YV16 && (source_width < 2 || source_height < 1)) ||
+ width == 0 || height == 0)
+ return;
+
+ // 4096 allows 3 buffers to fit in 12k.
+ // Helps performance on CPU with 16K L1 cache.
+ // Large enough for 3830x2160 and 30" displays which are 2560x1600.
+ const int kFilterBufferSize = 4096;
+ // Disable filtering if the screen is too big (to avoid buffer overflows).
+ // This should never happen to regular users: they don't have monitors
+ // wider than 4096 pixels.
+ // TODO(fbarchard): Allow rotated videos to filter.
+ if (source_width > kFilterBufferSize || view_rotate)
+ filter = FILTER_NONE;
+
+ unsigned int y_shift = yuv_type;
+ // Diagram showing origin and direction of source sampling.
+ // ->0 4<-
+ // 7 3
+ //
+ // 6 5
+ // ->1 2<-
+ // Rotations that start at right side of image.
+ if ((view_rotate == ROTATE_180) || (view_rotate == ROTATE_270) ||
+ (view_rotate == MIRROR_ROTATE_0) || (view_rotate == MIRROR_ROTATE_90)) {
+ y_buf += source_width - 1;
+ u_buf += source_width / 2 - 1;
+ v_buf += source_width / 2 - 1;
+ source_width = -source_width;
+ }
+ // Rotations that start at bottom of image.
+ if ((view_rotate == ROTATE_90) || (view_rotate == ROTATE_180) ||
+ (view_rotate == MIRROR_ROTATE_90) || (view_rotate == MIRROR_ROTATE_180)) {
+ y_buf += (source_height - 1) * y_pitch;
+ u_buf += ((source_height >> y_shift) - 1) * uv_pitch;
+ v_buf += ((source_height >> y_shift) - 1) * uv_pitch;
+ source_height = -source_height;
+ }
+
+ int source_dx = source_width * kFractionMax / width;
+
+ if ((view_rotate == ROTATE_90) || (view_rotate == ROTATE_270)) {
+ int tmp = height;
+ height = width;
+ width = tmp;
+ tmp = source_height;
+ source_height = source_width;
+ source_width = tmp;
+ int source_dy = source_height * kFractionMax / height;
+ source_dx = ((source_dy >> kFractionBits) * y_pitch) << kFractionBits;
+ if (view_rotate == ROTATE_90) {
+ y_pitch = -1;
+ uv_pitch = -1;
+ source_height = -source_height;
+ } else {
+ y_pitch = 1;
+ uv_pitch = 1;
+ }
+ }
+
+ // Need padding because FilterRows() will write 1 to 16 extra pixels
+ // after the end for SSE2 version.
+ uint8 yuvbuf[16 + kFilterBufferSize * 3 + 16];
+ uint8* ybuf =
+ reinterpret_cast<uint8*>(reinterpret_cast<uintptr_t>(yuvbuf + 15) & ~15);
+ uint8* ubuf = ybuf + kFilterBufferSize;
+ uint8* vbuf = ubuf + kFilterBufferSize;
+
+ // TODO(fbarchard): Fixed point math is off by 1 on negatives.
+
+ // We take a y-coordinate in [0,1] space in the source image space, and
+ // transform to a y-coordinate in [0,1] space in the destination image space.
+ // Note that the coordinate endpoints lie on pixel boundaries, not on pixel
+ // centers: e.g. a two-pixel-high image will have pixel centers at 0.25 and
+ // 0.75. The formula is as follows (in fixed-point arithmetic):
+ // y_dst = dst_height * ((y_src + 0.5) / src_height)
+ // dst_pixel = clamp([0, dst_height - 1], floor(y_dst - 0.5))
+ // Implement this here as an accumulator + delta, to avoid expensive math
+ // in the loop.
+ int source_y_subpixel_accum =
+ ((kFractionMax / 2) * source_height) / height - (kFractionMax / 2);
+ int source_y_subpixel_delta = ((1 << kFractionBits) * source_height) / height;
+
+ // TODO(fbarchard): Split this into separate function for better efficiency.
+ for (int y = 0; y < height; ++y) {
+ uint8* dest_pixel = rgb_buf + y * rgb_pitch;
+ int source_y_subpixel = source_y_subpixel_accum;
+ source_y_subpixel_accum += source_y_subpixel_delta;
+ if (source_y_subpixel < 0)
+ source_y_subpixel = 0;
+ else if (source_y_subpixel > ((source_height - 1) << kFractionBits))
+ source_y_subpixel = (source_height - 1) << kFractionBits;
+
+ const uint8* y_ptr = NULL;
+ const uint8* u_ptr = NULL;
+ const uint8* v_ptr = NULL;
+ // Apply vertical filtering if necessary.
+ // TODO(fbarchard): Remove memcpy when not necessary.
+ if (filter & media::FILTER_BILINEAR_V) {
+ int source_y = source_y_subpixel >> kFractionBits;
+ y_ptr = y_buf + source_y * y_pitch;
+ u_ptr = u_buf + (source_y >> y_shift) * uv_pitch;
+ v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
+
+ // Vertical scaler uses 16.8 fixed point.
+ int source_y_fraction = (source_y_subpixel & kFractionMask) >> 8;
+ if (source_y_fraction != 0) {
+ g_filter_yuv_rows_proc_(
+ ybuf, y_ptr, y_ptr + y_pitch, source_width, source_y_fraction);
+ } else {
+ memcpy(ybuf, y_ptr, source_width);
+ }
+ y_ptr = ybuf;
+ ybuf[source_width] = ybuf[source_width - 1];
+
+ int uv_source_width = (source_width + 1) / 2;
+ int source_uv_fraction;
+
+ // For formats with half-height UV planes, each even-numbered pixel row
+ // should not interpolate, since the next row to interpolate from should
+ // be a duplicate of the current row.
+ if (y_shift && (source_y & 0x1) == 0)
+ source_uv_fraction = 0;
+ else
+ source_uv_fraction = source_y_fraction;
+
+ if (source_uv_fraction != 0) {
+ g_filter_yuv_rows_proc_(
+ ubuf, u_ptr, u_ptr + uv_pitch, uv_source_width, source_uv_fraction);
+ g_filter_yuv_rows_proc_(
+ vbuf, v_ptr, v_ptr + uv_pitch, uv_source_width, source_uv_fraction);
+ } else {
+ memcpy(ubuf, u_ptr, uv_source_width);
+ memcpy(vbuf, v_ptr, uv_source_width);
+ }
+ u_ptr = ubuf;
+ v_ptr = vbuf;
+ ubuf[uv_source_width] = ubuf[uv_source_width - 1];
+ vbuf[uv_source_width] = vbuf[uv_source_width - 1];
+ } else {
+ // Offset by 1/2 pixel for center sampling.
+ int source_y = (source_y_subpixel + (kFractionMax / 2)) >> kFractionBits;
+ y_ptr = y_buf + source_y * y_pitch;
+ u_ptr = u_buf + (source_y >> y_shift) * uv_pitch;
+ v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
+ }
+ if (source_dx == kFractionMax) { // Not scaled
+ g_convert_yuv_to_rgb32_row_proc_(y_ptr, u_ptr, v_ptr, dest_pixel, width);
+ } else {
+ if (filter & FILTER_BILINEAR_H) {
+ g_linear_scale_yuv_to_rgb32_row_proc_(
+ y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
+ } else {
+ g_scale_yuv_to_rgb32_row_proc_(
+ y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
+ }
+ }
+ }
+
+ g_empty_register_state_proc_();
+}
+
+// Scale a frame of YV12 to 32 bit ARGB for a specific rectangle.
+void ScaleYUVToRGB32WithRect(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int source_width,
+ int source_height,
+ int dest_width,
+ int dest_height,
+ int dest_rect_left,
+ int dest_rect_top,
+ int dest_rect_right,
+ int dest_rect_bottom,
+ int y_pitch,
+ int uv_pitch,
+ int rgb_pitch) {
+ // This routine doesn't currently support up-scaling.
+ CHECK_LE(dest_width, source_width);
+ CHECK_LE(dest_height, source_height);
+
+ // Sanity-check the destination rectangle.
+ DCHECK(dest_rect_left >= 0 && dest_rect_right <= dest_width);
+ DCHECK(dest_rect_top >= 0 && dest_rect_bottom <= dest_height);
+ DCHECK(dest_rect_right > dest_rect_left);
+ DCHECK(dest_rect_bottom > dest_rect_top);
+
+ // Fixed-point value of vertical and horizontal scale down factor.
+ // Values are in the format 16.16.
+ int y_step = kFractionMax * source_height / dest_height;
+ int x_step = kFractionMax * source_width / dest_width;
+
+ // Determine the coordinates of the rectangle in 16.16 coords.
+ // NB: Our origin is the *center* of the top/left pixel, NOT its top/left.
+ // If we're down-scaling by more than a factor of two, we start with a 50%
+ // fraction to avoid degenerating to point-sampling - we should really just
+ // fix the fraction at 50% for all pixels in that case.
+ int source_left = dest_rect_left * x_step;
+ int source_right = (dest_rect_right - 1) * x_step;
+ if (x_step < kFractionMax * 2) {
+ source_left += ((x_step - kFractionMax) / 2);
+ source_right += ((x_step - kFractionMax) / 2);
+ } else {
+ source_left += kFractionMax / 2;
+ source_right += kFractionMax / 2;
+ }
+ int source_top = dest_rect_top * y_step;
+ if (y_step < kFractionMax * 2) {
+ source_top += ((y_step - kFractionMax) / 2);
+ } else {
+ source_top += kFractionMax / 2;
+ }
+
+ // Determine the parts of the Y, U and V buffers to interpolate.
+ int source_y_left = source_left >> kFractionBits;
+ int source_y_right =
+ std::min((source_right >> kFractionBits) + 2, source_width + 1);
+
+ int source_uv_left = source_y_left / 2;
+ int source_uv_right = std::min((source_right >> (kFractionBits + 1)) + 2,
+ (source_width + 1) / 2);
+
+ int source_y_width = source_y_right - source_y_left;
+ int source_uv_width = source_uv_right - source_uv_left;
+
+ // Determine number of pixels in each output row.
+ int dest_rect_width = dest_rect_right - dest_rect_left;
+
+ // Intermediate buffer for vertical interpolation.
+ // 4096 bytes allows 3 buffers to fit in 12k, which fits in a 16K L1 cache,
+ // and is bigger than most users will generally need.
+ // The buffer is 16-byte aligned and padded with 16 extra bytes; some of the
+ // FilterYUVRowProcs have alignment requirements, and the SSE version can
+ // write up to 16 bytes past the end of the buffer.
+ const int kFilterBufferSize = 4096;
+ const bool kAvoidUsingOptimizedFilter = source_width > kFilterBufferSize;
+ uint8 yuv_temp[16 + kFilterBufferSize * 3 + 16];
+ uint8* y_temp = reinterpret_cast<uint8*>(
+ reinterpret_cast<uintptr_t>(yuv_temp + 15) & ~15);
+ uint8* u_temp = y_temp + kFilterBufferSize;
+ uint8* v_temp = u_temp + kFilterBufferSize;
+
+ // Move to the top-left pixel of output.
+ rgb_buf += dest_rect_top * rgb_pitch;
+ rgb_buf += dest_rect_left * 4;
+
+ // For each destination row perform interpolation and color space
+ // conversion to produce the output.
+ for (int row = dest_rect_top; row < dest_rect_bottom; ++row) {
+ // Round the fixed-point y position to get the current row.
+ int source_row = source_top >> kFractionBits;
+ int source_uv_row = source_row / 2;
+ DCHECK(source_row < source_height);
+
+ // Locate the first row for each plane for interpolation.
+ const uint8* y0_ptr = y_buf + y_pitch * source_row + source_y_left;
+ const uint8* u0_ptr = u_buf + uv_pitch * source_uv_row + source_uv_left;
+ const uint8* v0_ptr = v_buf + uv_pitch * source_uv_row + source_uv_left;
+ const uint8* y1_ptr = NULL;
+ const uint8* u1_ptr = NULL;
+ const uint8* v1_ptr = NULL;
+
+ // Locate the second row for interpolation, being careful not to overrun.
+ if (source_row + 1 >= source_height) {
+ y1_ptr = y0_ptr;
+ } else {
+ y1_ptr = y0_ptr + y_pitch;
+ }
+ if (source_uv_row + 1 >= (source_height + 1) / 2) {
+ u1_ptr = u0_ptr;
+ v1_ptr = v0_ptr;
+ } else {
+ u1_ptr = u0_ptr + uv_pitch;
+ v1_ptr = v0_ptr + uv_pitch;
+ }
+
+ if (!kAvoidUsingOptimizedFilter) {
+ // Vertical scaler uses 16.8 fixed point.
+ int fraction = (source_top & kFractionMask) >> 8;
+ g_filter_yuv_rows_proc_(
+ y_temp + source_y_left, y0_ptr, y1_ptr, source_y_width, fraction);
+ g_filter_yuv_rows_proc_(
+ u_temp + source_uv_left, u0_ptr, u1_ptr, source_uv_width, fraction);
+ g_filter_yuv_rows_proc_(
+ v_temp + source_uv_left, v0_ptr, v1_ptr, source_uv_width, fraction);
+
+ // Perform horizontal interpolation and color space conversion.
+ // TODO(hclam): Use the MMX version after more testing.
+ LinearScaleYUVToRGB32RowWithRange_C(y_temp,
+ u_temp,
+ v_temp,
+ rgb_buf,
+ dest_rect_width,
+ source_left,
+ x_step);
+ } else {
+ // If the frame is too large then we linear scale a single row.
+ LinearScaleYUVToRGB32RowWithRange_C(y0_ptr,
+ u0_ptr,
+ v0_ptr,
+ rgb_buf,
+ dest_rect_width,
+ source_left,
+ x_step);
+ }
+
+ // Advance vertically in the source and destination image.
+ source_top += y_step;
+ rgb_buf += rgb_pitch;
+ }
+
+ g_empty_register_state_proc_();
+}
+
+void ConvertRGB32ToYUV(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ g_convert_rgb32_to_yuv_proc_(rgbframe,
+ yplane,
+ uplane,
+ vplane,
+ width,
+ height,
+ rgbstride,
+ ystride,
+ uvstride);
+}
+
+void ConvertRGB24ToYUV(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+ g_convert_rgb24_to_yuv_proc_(rgbframe,
+ yplane,
+ uplane,
+ vplane,
+ width,
+ height,
+ rgbstride,
+ ystride,
+ uvstride);
+}
+
+void ConvertYUY2ToYUV(const uint8* src,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height) {
+ for (int i = 0; i < height / 2; ++i) {
+ for (int j = 0; j < (width / 2); ++j) {
+ yplane[0] = src[0];
+ *uplane = src[1];
+ yplane[1] = src[2];
+ *vplane = src[3];
+ src += 4;
+ yplane += 2;
+ uplane++;
+ vplane++;
+ }
+ for (int j = 0; j < (width / 2); ++j) {
+ yplane[0] = src[0];
+ yplane[1] = src[2];
+ src += 4;
+ yplane += 2;
+ }
+ }
+}
+
+void ConvertNV21ToYUV(const uint8* src,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height) {
+ int y_plane_size = width * height;
+ memcpy(yplane, src, y_plane_size);
+
+ src += y_plane_size;
+ int u_plane_size = y_plane_size >> 2;
+ for (int i = 0; i < u_plane_size; ++i) {
+ *vplane++ = *src++;
+ *uplane++ = *src++;
+ }
+}
+
+void ConvertYUVToRGB32(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type) {
+ g_convert_yuv_to_rgb32_proc_(yplane,
+ uplane,
+ vplane,
+ rgbframe,
+ width,
+ height,
+ ystride,
+ uvstride,
+ rgbstride,
+ yuv_type);
+}
+
+void ConvertYUVAToARGB(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type) {
+ g_convert_yuva_to_argb_proc_(yplane,
+ uplane,
+ vplane,
+ aplane,
+ rgbframe,
+ width,
+ height,
+ ystride,
+ uvstride,
+ astride,
+ rgbstride,
+ yuv_type);
+}
+
+} // namespace media
diff --git a/chromium/media/base/yuv_convert.h b/chromium/media/base/yuv_convert.h
new file mode 100644
index 00000000000..8f64c79689a
--- /dev/null
+++ b/chromium/media/base/yuv_convert.h
@@ -0,0 +1,157 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_YUV_CONVERT_H_
+#define MEDIA_BASE_YUV_CONVERT_H_
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+// Visual Studio 2010 does not support MMX intrinsics on x64.
+// Some win64 yuv_convert code paths use SSE+MMX yasm, so without rewriting
+// them, we use yasm EmptyRegisterState_MMX in place of _mm_empty() or
+// hide the versions implemented with heavy use of MMX intrinsics.
+// TODO(wolenetz): Use MMX intrinsics when compiling win64 with Visual
+// Studio 2012? http://crbug.com/173450
+#if defined(ARCH_CPU_X86_FAMILY) && \
+ !(defined(ARCH_CPU_X86_64) && defined(COMPILER_MSVC))
+#define MEDIA_MMX_INTRINSICS_AVAILABLE
+#endif
+
+namespace media {
+
+// Type of YUV surface.
+// The value of these enums matter as they are used to shift vertical indices.
+enum YUVType {
+ YV16 = 0, // YV16 is half width and full height chroma channels.
+ YV12 = 1, // YV12 is half width and half height chroma channels.
+};
+
+// Mirror means flip the image horizontally, as in looking in a mirror.
+// Rotate happens after mirroring.
+enum Rotate {
+ ROTATE_0, // Rotation off.
+ ROTATE_90, // Rotate clockwise.
+ ROTATE_180, // Rotate upside down.
+ ROTATE_270, // Rotate counter clockwise.
+ MIRROR_ROTATE_0, // Mirror horizontally.
+ MIRROR_ROTATE_90, // Mirror then Rotate clockwise.
+ MIRROR_ROTATE_180, // Mirror vertically.
+ MIRROR_ROTATE_270, // Transpose.
+};
+
+// Filter affects how scaling looks.
+enum ScaleFilter {
+ FILTER_NONE = 0, // No filter (point sampled).
+ FILTER_BILINEAR_H = 1, // Bilinear horizontal filter.
+ FILTER_BILINEAR_V = 2, // Bilinear vertical filter.
+ FILTER_BILINEAR = 3, // Bilinear filter.
+};
+
+MEDIA_EXPORT void InitializeCPUSpecificYUVConversions();
+
+// Convert a frame of YUV to 32 bit ARGB.
+// Pass in YV16/YV12 depending on source format
+MEDIA_EXPORT void ConvertYUVToRGB32(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type);
+
+// Convert a frame of YUVA to 32 bit ARGB.
+// Pass in YV12A
+MEDIA_EXPORT void ConvertYUVAToARGB(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type);
+
+// Scale a frame of YUV to 32 bit ARGB.
+// Supports rotation and mirroring.
+MEDIA_EXPORT void ScaleYUVToRGB32(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int source_width,
+ int source_height,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int rgbstride,
+ YUVType yuv_type,
+ Rotate view_rotate,
+ ScaleFilter filter);
+
+// Biliner Scale a frame of YV12 to 32 bits ARGB on a specified rectangle.
+// |yplane|, etc and |rgbframe| should point to the top-left pixels of the
+// source and destination buffers.
+MEDIA_EXPORT void ScaleYUVToRGB32WithRect(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ uint8* rgbframe,
+ int source_width,
+ int source_height,
+ int dest_width,
+ int dest_height,
+ int dest_rect_left,
+ int dest_rect_top,
+ int dest_rect_right,
+ int dest_rect_bottom,
+ int ystride,
+ int uvstride,
+ int rgbstride);
+
+MEDIA_EXPORT void ConvertRGB32ToYUV(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertRGB24ToYUV(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+MEDIA_EXPORT void ConvertYUY2ToYUV(const uint8* src,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height);
+
+MEDIA_EXPORT void ConvertNV21ToYUV(const uint8* src,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height);
+
+// Empty SIMD register state after calling optimized scaler functions.
+MEDIA_EXPORT void EmptyRegisterState();
+
+} // namespace media
+
+#endif // MEDIA_BASE_YUV_CONVERT_H_
diff --git a/chromium/media/base/yuv_convert_unittest.cc b/chromium/media/base/yuv_convert_unittest.cc
new file mode 100644
index 00000000000..21a82f1ea25
--- /dev/null
+++ b/chromium/media/base/yuv_convert_unittest.cc
@@ -0,0 +1,970 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_paths.h"
+#include "base/cpu.h"
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "media/base/djb2.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
+#include "media/base/simd/convert_yuv_to_rgb.h"
+#include "media/base/simd/filter_yuv.h"
+#include "media/base/yuv_convert.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/rect.h"
+
+// Size of raw image.
+static const int kSourceWidth = 640;
+static const int kSourceHeight = 360;
+static const int kSourceYSize = kSourceWidth * kSourceHeight;
+static const int kSourceUOffset = kSourceYSize;
+static const int kSourceVOffset = kSourceYSize * 5 / 4;
+static const int kScaledWidth = 1024;
+static const int kScaledHeight = 768;
+static const int kDownScaledWidth = 512;
+static const int kDownScaledHeight = 320;
+static const int kBpp = 4;
+
+// Surface sizes for various test files.
+static const int kYUV12Size = kSourceYSize * 12 / 8;
+static const int kYUV16Size = kSourceYSize * 16 / 8;
+static const int kYUY2Size = kSourceYSize * 16 / 8;
+static const int kRGBSize = kSourceYSize * kBpp;
+static const int kRGBSizeScaled = kScaledWidth * kScaledHeight * kBpp;
+static const int kRGB24Size = kSourceYSize * 3;
+static const int kRGBSizeConverted = kSourceYSize * kBpp;
+
+// Helper for reading test data into a scoped_ptr<uint8[]>.
+static void ReadData(const base::FilePath::CharType* filename,
+ int expected_size,
+ scoped_ptr<uint8[]>* data) {
+ data->reset(new uint8[expected_size]);
+
+ base::FilePath path;
+ CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &path));
+ path = path.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test"))
+ .Append(FILE_PATH_LITERAL("data"))
+ .Append(filename);
+
+ // Verify file size is correct.
+ int64 actual_size = 0;
+ file_util::GetFileSize(path, &actual_size);
+ CHECK_EQ(actual_size, expected_size);
+
+ // Verify bytes read are correct.
+ int bytes_read = file_util::ReadFile(
+ path, reinterpret_cast<char*>(data->get()), expected_size);
+ CHECK_EQ(bytes_read, expected_size);
+}
+
+static void ReadYV12Data(scoped_ptr<uint8[]>* data) {
+ ReadData(FILE_PATH_LITERAL("bali_640x360_P420.yuv"), kYUV12Size, data);
+}
+
+static void ReadYV16Data(scoped_ptr<uint8[]>* data) {
+ ReadData(FILE_PATH_LITERAL("bali_640x360_P422.yuv"), kYUV16Size, data);
+}
+
+static void ReadRGB24Data(scoped_ptr<uint8[]>* data) {
+ ReadData(FILE_PATH_LITERAL("bali_640x360_RGB24.rgb"), kRGB24Size, data);
+}
+
+static void ReadYUY2Data(scoped_ptr<uint8[]>* data) {
+ ReadData(FILE_PATH_LITERAL("bali_640x360_YUY2.yuv"), kYUY2Size, data);
+}
+
+#if defined(OS_ANDROID)
+// Helper for swapping red and blue channels of RGBA or BGRA.
+static void SwapRedAndBlueChannels(unsigned char* pixels, size_t buffer_size) {
+ for (size_t i = 0; i < buffer_size; i += 4) {
+ std::swap(pixels[i], pixels[i + 2]);
+ }
+}
+#endif
+
+namespace media {
+
+TEST(YUVConvertTest, YV12) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes;
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_converted_bytes(new uint8[kRGBSizeConverted]);
+
+ // Read YUV reference data from file.
+ ReadYV12Data(&yuv_bytes);
+
+ // Convert a frame of YUV to 32 bit ARGB.
+ media::ConvertYUVToRGB32(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_converted_bytes.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ media::YV12);
+
+#if defined(OS_ANDROID)
+ SwapRedAndBlueChannels(rgb_converted_bytes.get(), kRGBSizeConverted);
+#endif
+
+ uint32 rgb_hash = DJB2Hash(rgb_converted_bytes.get(), kRGBSizeConverted,
+ kDJB2HashSeed);
+ EXPECT_EQ(2413171226u, rgb_hash);
+}
+
+TEST(YUVConvertTest, YV16) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes;
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_converted_bytes(new uint8[kRGBSizeConverted]);
+
+ // Read YUV reference data from file.
+ ReadYV16Data(&yuv_bytes);
+
+ // Convert a frame of YUV to 32 bit ARGB.
+ media::ConvertYUVToRGB32(yuv_bytes.get(), // Y
+ yuv_bytes.get() + kSourceUOffset, // U
+ yuv_bytes.get() + kSourceYSize * 3 / 2, // V
+ rgb_converted_bytes.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ media::YV16);
+
+#if defined(OS_ANDROID)
+ SwapRedAndBlueChannels(rgb_converted_bytes.get(), kRGBSizeConverted);
+#endif
+
+ uint32 rgb_hash = DJB2Hash(rgb_converted_bytes.get(), kRGBSizeConverted,
+ kDJB2HashSeed);
+ EXPECT_EQ(4222342047u, rgb_hash);
+}
+
+struct YUVScaleTestData {
+ YUVScaleTestData(media::YUVType y, media::ScaleFilter s, uint32 r)
+ : yuv_type(y),
+ scale_filter(s),
+ rgb_hash(r) {
+ }
+
+ media::YUVType yuv_type;
+ media::ScaleFilter scale_filter;
+ uint32 rgb_hash;
+};
+
+class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
+ public:
+ YUVScaleTest() {
+ switch (GetParam().yuv_type) {
+ case media::YV12:
+ ReadYV12Data(&yuv_bytes_);
+ break;
+ case media::YV16:
+ ReadYV16Data(&yuv_bytes_);
+ break;
+ }
+
+ rgb_bytes_.reset(new uint8[kRGBSizeScaled]);
+ }
+
+ // Helpers for getting the proper Y, U and V plane offsets.
+ uint8* y_plane() { return yuv_bytes_.get(); }
+ uint8* u_plane() { return yuv_bytes_.get() + kSourceYSize; }
+ uint8* v_plane() {
+ switch (GetParam().yuv_type) {
+ case media::YV12:
+ return yuv_bytes_.get() + kSourceVOffset;
+ case media::YV16:
+ return yuv_bytes_.get() + kSourceYSize * 3 / 2;
+ }
+ return NULL;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes_;
+ scoped_ptr<uint8[]> rgb_bytes_;
+};
+
+TEST_P(YUVScaleTest, NoScale) {
+ media::ScaleYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kSourceWidth * kBpp, // RgbStride
+ GetParam().yuv_type,
+ media::ROTATE_0,
+ GetParam().scale_filter);
+
+ uint32 yuv_hash = DJB2Hash(rgb_bytes_.get(), kRGBSize, kDJB2HashSeed);
+
+ media::ConvertYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ GetParam().yuv_type);
+
+ uint32 rgb_hash = DJB2Hash(rgb_bytes_.get(), kRGBSize, kDJB2HashSeed);
+
+ EXPECT_EQ(yuv_hash, rgb_hash);
+}
+
+TEST_P(YUVScaleTest, Normal) {
+ media::ScaleYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kScaledWidth, kScaledHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kScaledWidth * kBpp, // RgbStride
+ GetParam().yuv_type,
+ media::ROTATE_0,
+ GetParam().scale_filter);
+
+#if defined(OS_ANDROID)
+ SwapRedAndBlueChannels(rgb_bytes_.get(), kRGBSizeScaled);
+#endif
+
+ uint32 rgb_hash = DJB2Hash(rgb_bytes_.get(), kRGBSizeScaled, kDJB2HashSeed);
+ EXPECT_EQ(GetParam().rgb_hash, rgb_hash);
+}
+
+TEST_P(YUVScaleTest, ZeroSourceSize) {
+ media::ScaleYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ 0, 0, // Dimensions
+ kScaledWidth, kScaledHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kScaledWidth * kBpp, // RgbStride
+ GetParam().yuv_type,
+ media::ROTATE_0,
+ GetParam().scale_filter);
+
+ // Testing for out-of-bound read/writes with AddressSanitizer.
+}
+
+TEST_P(YUVScaleTest, ZeroDestinationSize) {
+ media::ScaleYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ 0, 0, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kScaledWidth * kBpp, // RgbStride
+ GetParam().yuv_type,
+ media::ROTATE_0,
+ GetParam().scale_filter);
+
+ // Testing for out-of-bound read/writes with AddressSanitizer.
+}
+
+TEST_P(YUVScaleTest, OddWidthAndHeightNotCrash) {
+ media::ScaleYUVToRGB32(y_plane(), // Y
+ u_plane(), // U
+ v_plane(), // V
+ rgb_bytes_.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ 3, 3, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kScaledWidth * kBpp, // RgbStride
+ GetParam().yuv_type,
+ media::ROTATE_0,
+ GetParam().scale_filter);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ YUVScaleFormats, YUVScaleTest,
+ ::testing::Values(
+ YUVScaleTestData(media::YV12, media::FILTER_NONE, 4136904952u),
+ YUVScaleTestData(media::YV16, media::FILTER_NONE, 1501777547u),
+ YUVScaleTestData(media::YV12, media::FILTER_BILINEAR, 3164274689u),
+ YUVScaleTestData(media::YV16, media::FILTER_BILINEAR, 3095878046u)));
+
+// This tests a known worst case YUV value, and for overflow.
+TEST(YUVConvertTest, Clamp) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[1]);
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[1]);
+ scoped_ptr<uint8[]> rgb_converted_bytes(new uint8[1]);
+
+ // Values that failed previously in bug report.
+ unsigned char y = 255u;
+ unsigned char u = 255u;
+ unsigned char v = 19u;
+
+ // Prefill extra large destination buffer to test for overflow.
+ unsigned char rgb[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ unsigned char expected[8] = { 255, 255, 104, 255, 4, 5, 6, 7 };
+ // Convert a frame of YUV to 32 bit ARGB.
+ media::ConvertYUVToRGB32(&y, // Y
+ &u, // U
+ &v, // V
+ &rgb[0], // RGB output
+ 1, 1, // Dimensions
+ 0, // YStride
+ 0, // UVStride
+ 0, // RGBStride
+ media::YV12);
+
+#if defined(OS_ANDROID)
+ SwapRedAndBlueChannels(rgb, kBpp);
+#endif
+
+ int expected_test = memcmp(rgb, expected, sizeof(expected));
+ EXPECT_EQ(0, expected_test);
+}
+
+TEST(YUVConvertTest, RGB24ToYUV) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> rgb_bytes;
+ scoped_ptr<uint8[]> yuv_converted_bytes(new uint8[kYUV12Size]);
+
+ // Read RGB24 reference data from file.
+ ReadRGB24Data(&rgb_bytes);
+
+ // Convert to I420.
+ media::ConvertRGB24ToYUV(rgb_bytes.get(),
+ yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth * 3, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ uint32 rgb_hash = DJB2Hash(yuv_converted_bytes.get(), kYUV12Size,
+ kDJB2HashSeed);
+ EXPECT_EQ(320824432u, rgb_hash);
+}
+
+TEST(YUVConvertTest, RGB32ToYUV) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> yuv_converted_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_converted_bytes(new uint8[kRGBSize]);
+
+ // Read YUV reference data from file.
+ base::FilePath yuv_url;
+ EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &yuv_url));
+ yuv_url = yuv_url.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test"))
+ .Append(FILE_PATH_LITERAL("data"))
+ .Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
+ EXPECT_EQ(static_cast<int>(kYUV12Size),
+ file_util::ReadFile(yuv_url,
+ reinterpret_cast<char*>(yuv_bytes.get()),
+ static_cast<int>(kYUV12Size)));
+
+ // Convert a frame of YUV to 32 bit ARGB.
+ media::ConvertYUVToRGB32(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ media::YV12);
+
+ // Convert RGB32 to YV12.
+ media::ConvertRGB32ToYUV(rgb_bytes.get(),
+ yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth * 4, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ // Convert YV12 back to RGB32.
+ media::ConvertYUVToRGB32(yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ rgb_converted_bytes.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ media::YV12);
+
+ int error = 0;
+ for (int i = 0; i < kRGBSize; ++i) {
+ int diff = rgb_converted_bytes[i] - rgb_bytes[i];
+ if (diff < 0)
+ diff = -diff;
+ error += diff;
+ }
+
+ // Make sure error is within bound.
+ DVLOG(1) << "Average error per channel: " << error / kRGBSize;
+ EXPECT_GT(5, error / kRGBSize);
+}
+
+TEST(YUVConvertTest, YUY2ToYUV) {
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuy_bytes;
+ scoped_ptr<uint8[]> yuv_converted_bytes(new uint8[kYUV12Size]);
+
+ // Read YUY reference data from file.
+ ReadYUY2Data(&yuy_bytes);
+
+ // Convert to I420.
+ media::ConvertYUY2ToYUV(yuy_bytes.get(),
+ yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ kSourceWidth, kSourceHeight);
+
+ uint32 yuy_hash = DJB2Hash(yuv_converted_bytes.get(), kYUV12Size,
+ kDJB2HashSeed);
+ EXPECT_EQ(666823187u, yuy_hash);
+}
+
+TEST(YUVConvertTest, DownScaleYUVToRGB32WithRect) {
+ // Read YUV reference data from file.
+ base::FilePath yuv_url;
+ EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &yuv_url));
+ yuv_url = yuv_url.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test"))
+ .Append(FILE_PATH_LITERAL("data"))
+ .Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
+ const size_t size_of_yuv = kSourceYSize * 12 / 8; // 12 bpp.
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[size_of_yuv]);
+ EXPECT_EQ(static_cast<int>(size_of_yuv),
+ file_util::ReadFile(yuv_url,
+ reinterpret_cast<char*>(yuv_bytes.get()),
+ static_cast<int>(size_of_yuv)));
+
+ // Scale the full frame of YUV to 32 bit ARGB.
+ // The API currently only supports down-scaling, so we don't test up-scaling.
+ const size_t size_of_rgb_scaled = kDownScaledWidth * kDownScaledHeight * kBpp;
+ scoped_ptr<uint8[]> rgb_scaled_bytes(new uint8[size_of_rgb_scaled]);
+ gfx::Rect sub_rect(0, 0, kDownScaledWidth, kDownScaledHeight);
+
+ // We can't compare with the full-frame scaler because it uses slightly
+ // different sampling coordinates.
+ media::ScaleYUVToRGB32WithRect(
+ yuv_bytes.get(), // Y
+ yuv_bytes.get() + kSourceUOffset, // U
+ yuv_bytes.get() + kSourceVOffset, // V
+ rgb_scaled_bytes.get(), // Rgb output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kDownScaledWidth, kDownScaledHeight, // Dimensions
+ sub_rect.x(), sub_rect.y(), // Dest rect
+ sub_rect.right(), sub_rect.bottom(), // Dest rect
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kDownScaledWidth * kBpp); // RgbStride
+
+ uint32 rgb_hash_full_rect = DJB2Hash(rgb_scaled_bytes.get(),
+ size_of_rgb_scaled,
+ kDJB2HashSeed);
+
+ // Re-scale sub-rectangles and verify the results are the same.
+ int next_sub_rect = 0;
+ while (!sub_rect.IsEmpty()) {
+ // Scale a partial rectangle.
+ media::ScaleYUVToRGB32WithRect(
+ yuv_bytes.get(), // Y
+ yuv_bytes.get() + kSourceUOffset, // U
+ yuv_bytes.get() + kSourceVOffset, // V
+ rgb_scaled_bytes.get(), // Rgb output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kDownScaledWidth, kDownScaledHeight, // Dimensions
+ sub_rect.x(), sub_rect.y(), // Dest rect
+ sub_rect.right(), sub_rect.bottom(), // Dest rect
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UvStride
+ kDownScaledWidth * kBpp); // RgbStride
+ uint32 rgb_hash_sub_rect = DJB2Hash(rgb_scaled_bytes.get(),
+ size_of_rgb_scaled,
+ kDJB2HashSeed);
+
+ EXPECT_EQ(rgb_hash_full_rect, rgb_hash_sub_rect);
+
+ // Now pick choose a quarter rect of this sub-rect.
+ if (next_sub_rect & 1)
+ sub_rect.set_x(sub_rect.x() + sub_rect.width() / 2);
+ if (next_sub_rect & 2)
+ sub_rect.set_y(sub_rect.y() + sub_rect.height() / 2);
+ sub_rect.set_width(sub_rect.width() / 2);
+ sub_rect.set_height(sub_rect.height() / 2);
+ next_sub_rect++;
+ }
+}
+
+#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+TEST(YUVConvertTest, RGB32ToYUV_SSE2_MatchReference) {
+ base::CPU cpu;
+ if (!cpu.has_sse2()) {
+ LOG(WARNING) << "System doesn't support SSE2, test not executed.";
+ return;
+ }
+
+ // Allocate all surfaces.
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> yuv_converted_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> yuv_reference_bytes(new uint8[kYUV12Size]);
+
+ ReadYV12Data(&yuv_bytes);
+
+ // Convert a frame of YUV to 32 bit ARGB.
+ media::ConvertYUVToRGB32(
+ yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes.get(), // RGB output
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth, // YStride
+ kSourceWidth / 2, // UVStride
+ kSourceWidth * kBpp, // RGBStride
+ media::YV12);
+
+ // Convert RGB32 to YV12 with SSE2 version.
+ media::ConvertRGB32ToYUV_SSE2(
+ rgb_bytes.get(),
+ yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth * 4, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ // Convert RGB32 to YV12 with reference version.
+ media::ConvertRGB32ToYUV_SSE2_Reference(
+ rgb_bytes.get(),
+ yuv_reference_bytes.get(),
+ yuv_reference_bytes.get() + kSourceUOffset,
+ yuv_reference_bytes.get() + kSourceVOffset,
+ kSourceWidth, kSourceHeight, // Dimensions
+ kSourceWidth * 4, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ // Now convert a odd width and height, this overrides part of the buffer
+ // generated above but that is fine because the point of this test is to
+ // match the result with the reference code.
+
+ // Convert RGB32 to YV12 with SSE2 version.
+ media::ConvertRGB32ToYUV_SSE2(
+ rgb_bytes.get(),
+ yuv_converted_bytes.get(),
+ yuv_converted_bytes.get() + kSourceUOffset,
+ yuv_converted_bytes.get() + kSourceVOffset,
+ 7, 7, // Dimensions
+ kSourceWidth * 4, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ // Convert RGB32 to YV12 with reference version.
+ media::ConvertRGB32ToYUV_SSE2_Reference(
+ rgb_bytes.get(),
+ yuv_reference_bytes.get(),
+ yuv_reference_bytes.get() + kSourceUOffset,
+ yuv_reference_bytes.get() + kSourceVOffset,
+ 7, 7, // Dimensions
+ kSourceWidth * 4, // RGBStride
+ kSourceWidth, // YStride
+ kSourceWidth / 2); // UVStride
+
+ int error = 0;
+ for (int i = 0; i < kYUV12Size; ++i) {
+ int diff = yuv_reference_bytes[i] - yuv_converted_bytes[i];
+ if (diff < 0)
+ diff = -diff;
+ error += diff;
+ }
+
+ // Make sure there's no difference from the reference.
+ EXPECT_EQ(0, error);
+}
+
+TEST(YUVConvertTest, ConvertYUVToRGB32Row_MMX) {
+ base::CPU cpu;
+ if (!cpu.has_mmx()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ ConvertYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth);
+ ConvertYUVToRGB32Row_MMX(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, ConvertYUVToRGB32Row_SSE) {
+ base::CPU cpu;
+ if (!cpu.has_sse()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ ConvertYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth);
+ ConvertYUVToRGB32Row_SSE(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, ScaleYUVToRGB32Row_MMX) {
+ base::CPU cpu;
+ if (!cpu.has_mmx()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ ScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ ScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE) {
+ base::CPU cpu;
+ if (!cpu.has_sse()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ ScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ ScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX) {
+ base::CPU cpu;
+ if (!cpu.has_mmx()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ LinearScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_SSE) {
+ base::CPU cpu;
+ if (!cpu.has_sse()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ LinearScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, FilterYUVRows_C_OutOfBounds) {
+ scoped_ptr<uint8[]> src(new uint8[16]);
+ scoped_ptr<uint8[]> dst(new uint8[16]);
+
+ memset(src.get(), 0xff, 16);
+ memset(dst.get(), 0, 16);
+
+ media::FilterYUVRows_C(dst.get(), src.get(), src.get(), 1, 255);
+
+ EXPECT_EQ(255u, dst[0]);
+ for (int i = 1; i < 16; ++i) {
+ EXPECT_EQ(0u, dst[i]) << " not equal at " << i;
+ }
+}
+
+#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+TEST(YUVConvertTest, FilterYUVRows_MMX_OutOfBounds) {
+ base::CPU cpu;
+ if (!cpu.has_mmx()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> src(new uint8[16]);
+ scoped_ptr<uint8[]> dst(new uint8[16]);
+
+ memset(src.get(), 0xff, 16);
+ memset(dst.get(), 0, 16);
+
+ media::FilterYUVRows_MMX(dst.get(), src.get(), src.get(), 1, 255);
+ media::EmptyRegisterState();
+
+ EXPECT_EQ(255u, dst[0]);
+ for (int i = 1; i < 16; ++i) {
+ EXPECT_EQ(0u, dst[i]);
+ }
+}
+#endif // defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+
+TEST(YUVConvertTest, FilterYUVRows_SSE2_OutOfBounds) {
+ base::CPU cpu;
+ if (!cpu.has_sse2()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ scoped_ptr<uint8[]> src(new uint8[16]);
+ scoped_ptr<uint8[]> dst(new uint8[16]);
+
+ memset(src.get(), 0xff, 16);
+ memset(dst.get(), 0, 16);
+
+ media::FilterYUVRows_SSE2(dst.get(), src.get(), src.get(), 1, 255);
+
+ EXPECT_EQ(255u, dst[0]);
+ for (int i = 1; i < 16; ++i) {
+ EXPECT_EQ(0u, dst[i]);
+ }
+}
+
+#if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+TEST(YUVConvertTest, FilterYUVRows_MMX_UnalignedDestination) {
+ base::CPU cpu;
+ if (!cpu.has_mmx()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ const int kSize = 32;
+ scoped_ptr<uint8[]> src(new uint8[kSize]);
+ scoped_ptr<uint8[]> dst_sample(new uint8[kSize]);
+ scoped_ptr<uint8[]> dst(new uint8[kSize]);
+
+ memset(dst_sample.get(), 0, kSize);
+ memset(dst.get(), 0, kSize);
+ for (int i = 0; i < kSize; ++i)
+ src[i] = 100 + i;
+
+ media::FilterYUVRows_C(dst_sample.get(),
+ src.get(), src.get(), 17, 128);
+
+ // Generate an unaligned output address.
+ uint8* dst_ptr =
+ reinterpret_cast<uint8*>(
+ (reinterpret_cast<uintptr_t>(dst.get() + 8) & ~7) + 1);
+ media::FilterYUVRows_MMX(dst_ptr, src.get(), src.get(), 17, 128);
+ media::EmptyRegisterState();
+
+ EXPECT_EQ(0, memcmp(dst_sample.get(), dst_ptr, 17));
+}
+#endif // defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
+
+TEST(YUVConvertTest, FilterYUVRows_SSE2_UnalignedDestination) {
+ base::CPU cpu;
+ if (!cpu.has_sse2()) {
+ LOG(WARNING) << "System not supported. Test skipped.";
+ return;
+ }
+
+ const int kSize = 64;
+ scoped_ptr<uint8[]> src(new uint8[kSize]);
+ scoped_ptr<uint8[]> dst_sample(new uint8[kSize]);
+ scoped_ptr<uint8[]> dst(new uint8[kSize]);
+
+ memset(dst_sample.get(), 0, kSize);
+ memset(dst.get(), 0, kSize);
+ for (int i = 0; i < kSize; ++i)
+ src[i] = 100 + i;
+
+ media::FilterYUVRows_C(dst_sample.get(),
+ src.get(), src.get(), 37, 128);
+
+ // Generate an unaligned output address.
+ uint8* dst_ptr =
+ reinterpret_cast<uint8*>(
+ (reinterpret_cast<uintptr_t>(dst.get() + 16) & ~15) + 1);
+ media::FilterYUVRows_SSE2(dst_ptr, src.get(), src.get(), 37, 128);
+ media::EmptyRegisterState();
+
+ EXPECT_EQ(0, memcmp(dst_sample.get(), dst_ptr, 37));
+}
+
+#if defined(ARCH_CPU_X86_64)
+
+TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE2_X64) {
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ ScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ ScaleYUVToRGB32Row_SSE2_X64(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX_X64) {
+ scoped_ptr<uint8[]> yuv_bytes(new uint8[kYUV12Size]);
+ scoped_ptr<uint8[]> rgb_bytes_reference(new uint8[kRGBSize]);
+ scoped_ptr<uint8[]> rgb_bytes_converted(new uint8[kRGBSize]);
+ ReadYV12Data(&yuv_bytes);
+
+ const int kWidth = 167;
+ const int kSourceDx = 80000; // This value means a scale down.
+ LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_reference.get(),
+ kWidth,
+ kSourceDx);
+ LinearScaleYUVToRGB32Row_MMX_X64(yuv_bytes.get(),
+ yuv_bytes.get() + kSourceUOffset,
+ yuv_bytes.get() + kSourceVOffset,
+ rgb_bytes_converted.get(),
+ kWidth,
+ kSourceDx);
+ media::EmptyRegisterState();
+ EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
+ rgb_bytes_converted.get(),
+ kWidth * kBpp));
+}
+
+#endif // defined(ARCH_CPU_X86_64)
+
+#endif // defined(ARCH_CPU_X86_FAMILY)
+
+} // namespace media