summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/media
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/audio/alsa/alsa_input.cc (renamed from chromium/media/audio/linux/alsa_input.cc)14
-rw-r--r--chromium/media/audio/alsa/alsa_input.h (renamed from chromium/media/audio/linux/alsa_input.h)14
-rw-r--r--chromium/media/audio/alsa/alsa_output.cc (renamed from chromium/media/audio/linux/alsa_output.cc)12
-rw-r--r--chromium/media/audio/alsa/alsa_output.h (renamed from chromium/media/audio/linux/alsa_output.h)14
-rw-r--r--chromium/media/audio/alsa/alsa_output_unittest.cc (renamed from chromium/media/audio/linux/alsa_output_unittest.cc)19
-rw-r--r--chromium/media/audio/alsa/alsa_util.cc (renamed from chromium/media/audio/linux/alsa_util.cc)6
-rw-r--r--chromium/media/audio/alsa/alsa_util.h (renamed from chromium/media/audio/linux/alsa_util.h)8
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.cc (renamed from chromium/media/audio/linux/alsa_wrapper.cc)4
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.h (renamed from chromium/media/audio/linux/alsa_wrapper.h)7
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc362
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.h (renamed from chromium/media/audio/linux/audio_manager_linux.h)16
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc202
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc141
-rw-r--r--chromium/media/audio/android/audio_manager_android.h25
-rw-r--r--chromium/media/audio/android/audio_record_input.cc131
-rw-r--r--chromium/media/audio/android/audio_record_input.h72
-rw-r--r--chromium/media/audio/android/opensles_input.cc16
-rw-r--r--chromium/media/audio/android/opensles_input.h4
-rw-r--r--chromium/media/audio/android/opensles_output.cc10
-rw-r--r--chromium/media/audio/android/opensles_output.h10
-rw-r--r--chromium/media/audio/audio_device_thread.cc40
-rw-r--r--chromium/media/audio/audio_device_thread.h8
-rw-r--r--chromium/media/audio/audio_input_controller_unittest.cc10
-rw-r--r--chromium/media/audio/audio_input_device.cc8
-rw-r--r--chromium/media/audio/audio_input_unittest.cc10
-rw-r--r--chromium/media/audio/audio_input_volume_unittest.cc3
-rw-r--r--chromium/media/audio/audio_logging.h84
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc30
-rw-r--r--chromium/media/audio/audio_manager.cc24
-rw-r--r--chromium/media/audio/audio_manager.h38
-rw-r--r--chromium/media/audio/audio_manager_base.cc109
-rw-r--r--chromium/media/audio/audio_manager_base.h26
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc32
-rw-r--r--chromium/media/audio/audio_output_controller.cc123
-rw-r--r--chromium/media/audio/audio_output_controller.h40
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc55
-rw-r--r--chromium/media/audio/audio_output_device.cc52
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc13
-rw-r--r--chromium/media/audio/audio_output_dispatcher.cc6
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h17
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc172
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h53
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc336
-rw-r--r--chromium/media/audio/audio_output_resampler.cc124
-rw-r--r--chromium/media/audio/audio_output_resampler.h6
-rw-r--r--chromium/media/audio/audio_parameters.cc34
-rw-r--r--chromium/media/audio/audio_parameters.h29
-rw-r--r--chromium/media/audio/audio_power_monitor.cc30
-rw-r--r--chromium/media/audio/audio_util.cc99
-rw-r--r--chromium/media/audio/audio_util.h31
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc6
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h2
-rw-r--r--chromium/media/audio/cras/cras_input.cc2
-rw-r--r--chromium/media/audio/cras/cras_unified.cc36
-rw-r--r--chromium/media/audio/fake_audio_log_factory.cc32
-rw-r--r--chromium/media/audio/fake_audio_log_factory.h29
-rw-r--r--chromium/media/audio/fake_audio_manager.cc83
-rw-r--r--chromium/media/audio/fake_audio_manager.h53
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc4
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc362
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc155
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h27
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc2
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc1
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc5
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc2
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc83
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h12
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.cc1
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.cc1
-rw-r--r--chromium/media/audio/mock_audio_manager.cc16
-rw-r--r--chromium/media/audio/mock_audio_manager.h10
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc12
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h2
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc16
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.h4
-rw-r--r--chromium/media/audio/shared_memory_util.cc72
-rw-r--r--chromium/media/audio/shared_memory_util.h39
-rw-r--r--chromium/media/audio/simple_sources.cc1
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.cc188
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.h76
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler_unittest.cc108
-rw-r--r--chromium/media/audio/sounds/sounds_manager.cc150
-rw-r--r--chromium/media/audio/sounds/sounds_manager.h56
-rw-r--r--chromium/media/audio/sounds/sounds_manager_unittest.cc69
-rw-r--r--chromium/media/audio/sounds/test_data.cc34
-rw-r--r--chromium/media/audio/sounds/test_data.h51
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.cc141
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.h59
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler_unittest.cc33
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.cc5
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.h8
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc1
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc1
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc31
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc184
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h29
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc49
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc87
-rw-r--r--chromium/media/audio/win/audio_manager_win.h4
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc65
-rw-r--r--chromium/media/audio/win/audio_unified_win.cc30
-rw-r--r--chromium/media/audio/win/audio_unified_win_unittest.cc17
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc58
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h38
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc4
-rw-r--r--chromium/media/audio/win/wavein_input_win.cc1
-rw-r--r--chromium/media/audio/win/waveout_output_win.cc24
-rw-r--r--chromium/media/base/android/audio_decoder_job.cc18
-rw-r--r--chromium/media/base/android/audio_decoder_job.h8
-rw-r--r--chromium/media/base/android/demuxer_android.h45
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc405
-rw-r--r--chromium/media/base/android/media_codec_bridge.h134
-rw-r--r--chromium/media/base/android/media_codec_bridge_unittest.cc78
-rw-r--r--chromium/media/base/android/media_decoder_job.cc138
-rw-r--r--chromium/media/base/android/media_decoder_job.h47
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc89
-rw-r--r--chromium/media/base/android/media_drm_bridge.h49
-rw-r--r--chromium/media/base/android/media_player_android.cc44
-rw-r--r--chromium/media/base/android/media_player_android.h23
-rw-r--r--chromium/media/base/android/media_player_bridge.cc109
-rw-r--r--chromium/media/base/android/media_player_bridge.h29
-rw-r--r--chromium/media/base/android/media_player_manager.h50
-rw-r--r--chromium/media/base/android/media_source_player.cc412
-rw-r--r--chromium/media/base/android/media_source_player.h91
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc2000
-rw-r--r--chromium/media/base/android/video_decoder_job.cc20
-rw-r--r--chromium/media/base/android/video_decoder_job.h8
-rw-r--r--chromium/media/base/audio_bus.cc11
-rw-r--r--chromium/media/base/audio_bus.h1
-rw-r--r--chromium/media/base/audio_bus_perftest.cc53
-rw-r--r--chromium/media/base/audio_bus_unittest.cc56
-rw-r--r--chromium/media/base/audio_converter.cc6
-rw-r--r--chromium/media/base/audio_converter_perftest.cc79
-rw-r--r--chromium/media/base/audio_converter_unittest.cc108
-rw-r--r--chromium/media/base/audio_decoder_config.h1
-rw-r--r--chromium/media/base/audio_renderer_mixer_unittest.cc15
-rw-r--r--chromium/media/base/bind_to_loop.h4
-rw-r--r--chromium/media/base/bind_to_loop.h.pump4
-rw-r--r--chromium/media/base/bit_reader.cc2
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc18
-rw-r--r--chromium/media/base/container_names_unittest.cc4
-rw-r--r--chromium/media/base/data_source.cc2
-rw-r--r--chromium/media/base/data_source.h3
-rw-r--r--chromium/media/base/demuxer.cc14
-rw-r--r--chromium/media/base/demuxer.h26
-rw-r--r--chromium/media/base/demuxer_perftest.cc (renamed from chromium/media/tools/demuxer_bench/demuxer_bench.cc)146
-rw-r--r--chromium/media/base/demuxer_stream.h1
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.cc86
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.h61
-rw-r--r--chromium/media/base/fake_text_track_stream.cc83
-rw-r--r--chromium/media/base/fake_text_track_stream.h47
-rw-r--r--chromium/media/base/filter_collection.cc10
-rw-r--r--chromium/media/base/filter_collection.h5
-rw-r--r--chromium/media/base/media_file_checker.cc4
-rw-r--r--chromium/media/base/media_keys.h60
-rw-r--r--chromium/media/base/media_log.cc2
-rw-r--r--chromium/media/base/media_log_event.h3
-rw-r--r--chromium/media/base/media_switches.cc24
-rw-r--r--chromium/media/base/media_switches.h10
-rw-r--r--chromium/media/base/mock_demuxer_host.h6
-rw-r--r--chromium/media/base/mock_filters.cc4
-rw-r--r--chromium/media/base/mock_filters.h19
-rw-r--r--chromium/media/base/pipeline.cc88
-rw-r--r--chromium/media/base/pipeline.h22
-rw-r--r--chromium/media/base/pipeline_status.cc24
-rw-r--r--chromium/media/base/pipeline_status.h5
-rw-r--r--chromium/media/base/pipeline_unittest.cc119
-rw-r--r--chromium/media/base/run_all_unittests.cc6
-rw-r--r--chromium/media/base/simd/vector_math_sse.cc79
-rw-r--r--chromium/media/base/sinc_resampler.cc17
-rw-r--r--chromium/media/base/sinc_resampler.h8
-rw-r--r--chromium/media/base/sinc_resampler_perftest.cc76
-rw-r--r--chromium/media/base/sinc_resampler_unittest.cc77
-rw-r--r--chromium/media/base/stream_parser.h15
-rw-r--r--chromium/media/base/test_data_util.cc4
-rw-r--r--chromium/media/base/text_cue.cc23
-rw-r--r--chromium/media/base/text_cue.h48
-rw-r--r--chromium/media/base/text_renderer.cc369
-rw-r--r--chromium/media/base/text_renderer.h145
-rw-r--r--chromium/media/base/text_renderer_unittest.cc1382
-rw-r--r--chromium/media/base/text_track.h19
-rw-r--r--chromium/media/base/text_track_config.cc30
-rw-r--r--chromium/media/base/text_track_config.h48
-rw-r--r--chromium/media/base/user_input_monitor_linux.cc41
-rw-r--r--chromium/media/base/user_input_monitor_win.cc26
-rw-r--r--chromium/media/base/vector_math.cc97
-rw-r--r--chromium/media/base/vector_math.h12
-rw-r--r--chromium/media/base/vector_math_perftest.cc181
-rw-r--r--chromium/media/base/vector_math_testing.h8
-rw-r--r--chromium/media/base/vector_math_unittest.cc390
-rw-r--r--chromium/media/base/video_decoder_config.cc4
-rw-r--r--chromium/media/base/video_frame.cc193
-rw-r--r--chromium/media/base/video_frame.h91
-rw-r--r--chromium/media/base/video_frame_pool.cc128
-rw-r--r--chromium/media/base/video_frame_pool.h50
-rw-r--r--chromium/media/base/video_frame_pool_unittest.cc73
-rw-r--r--chromium/media/base/video_frame_unittest.cc235
-rw-r--r--chromium/media/base/video_renderer.h9
-rw-r--r--chromium/media/base/video_util.cc1
-rw-r--r--chromium/media/base/yuv_convert_unittest.cc16
-rw-r--r--chromium/media/cast/DEPS6
-rw-r--r--chromium/media/cast/OWNERS2
-rw-r--r--chromium/media/cast/README4
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.cc90
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder.h43
-rw-r--r--chromium/media/cast/audio_receiver/audio_decoder_unittest.cc99
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.cc434
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.gypi7
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver.h80
-rw-r--r--chromium/media/cast/audio_receiver/audio_receiver_unittest.cc166
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.cc378
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder.h67
-rw-r--r--chromium/media/cast/audio_sender/audio_encoder_unittest.cc222
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.cc160
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.gypi8
-rw-r--r--chromium/media/cast/audio_sender/audio_sender.h53
-rw-r--r--chromium/media/cast/audio_sender/audio_sender_unittest.cc79
-rw-r--r--chromium/media/cast/cast.gyp124
-rw-r--r--chromium/media/cast/cast_config.cc5
-rw-r--r--chromium/media/cast/cast_config.h61
-rw-r--r--chromium/media/cast/cast_defines.h89
-rw-r--r--chromium/media/cast/cast_environment.cc101
-rw-r--r--chromium/media/cast/cast_environment.h (renamed from chromium/media/cast/cast_thread.h)41
-rw-r--r--chromium/media/cast/cast_receiver.gyp17
-rw-r--r--chromium/media/cast/cast_receiver.h55
-rw-r--r--chromium/media/cast/cast_receiver_impl.cc175
-rw-r--r--chromium/media/cast/cast_receiver_impl.h50
-rw-r--r--chromium/media/cast/cast_sender.gyp7
-rw-r--r--chromium/media/cast/cast_sender.h38
-rw-r--r--chromium/media/cast/cast_sender_impl.cc98
-rw-r--r--chromium/media/cast/cast_sender_impl.h19
-rw-r--r--chromium/media/cast/cast_thread.cc64
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.cc13
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.gypi1
-rw-r--r--chromium/media/cast/congestion_control/congestion_control.h15
-rw-r--r--chromium/media/cast/congestion_control/congestion_control_unittest.cc77
-rw-r--r--chromium/media/cast/framer/cast_message_builder.cc27
-rw-r--r--chromium/media/cast/framer/cast_message_builder.h19
-rw-r--r--chromium/media/cast/framer/cast_message_builder_unittest.cc89
-rw-r--r--chromium/media/cast/framer/frame_buffer.cc12
-rw-r--r--chromium/media/cast/framer/frame_buffer.h14
-rw-r--r--chromium/media/cast/framer/frame_buffer_unittest.cc4
-rw-r--r--chromium/media/cast/framer/frame_id_map.cc40
-rw-r--r--chromium/media/cast/framer/frame_id_map.h38
-rw-r--r--chromium/media/cast/framer/framer.cc43
-rw-r--r--chromium/media/cast/framer/framer.h33
-rw-r--r--chromium/media/cast/framer/framer_unittest.cc146
-rw-r--r--chromium/media/cast/logging/logging_defines.cc101
-rw-r--r--chromium/media/cast/logging/logging_defines.h130
-rw-r--r--chromium/media/cast/logging/logging_impl.cc225
-rw-r--r--chromium/media/cast/logging/logging_impl.h78
-rw-r--r--chromium/media/cast/logging/logging_internal.cc79
-rw-r--r--chromium/media/cast/logging/logging_internal.h95
-rw-r--r--chromium/media/cast/logging/logging_raw.cc143
-rw-r--r--chromium/media/cast/logging/logging_raw.h85
-rw-r--r--chromium/media/cast/logging/logging_stats.cc150
-rw-r--r--chromium/media/cast/logging/logging_stats.h75
-rw-r--r--chromium/media/cast/logging/logging_unittest.cc248
-rw-r--r--chromium/media/cast/net/cast_net_defines.h81
-rw-r--r--chromium/media/cast/net/frame_id_wrap_helper_test.cc48
-rw-r--r--chromium/media/cast/net/pacing/mock_paced_packet_sender.cc17
-rw-r--r--chromium/media/cast/net/pacing/mock_paced_packet_sender.h27
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.cc148
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.gyp (renamed from chromium/media/cast/pacing/paced_sender.gyp)3
-rw-r--r--chromium/media/cast/net/pacing/paced_sender.h (renamed from chromium/media/cast/pacing/paced_sender.h)53
-rw-r--r--chromium/media/cast/net/pacing/paced_sender_unittest.cc257
-rw-r--r--chromium/media/cast/net/rtp_sender/mock_rtp_sender.h (renamed from chromium/media/cast/rtp_sender/mock_rtp_sender.h)4
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc (renamed from chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc)69
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp (renamed from chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi)0
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h (renamed from chromium/media/cast/rtp_sender/packet_storage/packet_storage.h)30
-rw-r--r--chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc (renamed from chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc)70
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc (renamed from chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc)70
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp (renamed from chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi)4
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h (renamed from chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h)22
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc21
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h (renamed from chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h)18
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc (renamed from chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc)94
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.cc (renamed from chromium/media/cast/rtp_sender/rtp_sender.cc)72
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.gyp (renamed from chromium/media/cast/rtp_sender/rtp_sender.gyp)5
-rw-r--r--chromium/media/cast/net/rtp_sender/rtp_sender.h (renamed from chromium/media/cast/rtp_sender/rtp_sender.h)30
-rw-r--r--chromium/media/cast/pacing/mock_paced_packet_sender.h26
-rw-r--r--chromium/media/cast/pacing/mock_packet_sender.h22
-rw-r--r--chromium/media/cast/pacing/paced_sender.cc123
-rw-r--r--chromium/media/cast/pacing/paced_sender_unittest.cc265
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc23
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h11
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc17
-rw-r--r--chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h13
-rw-r--r--chromium/media/cast/rtcp/rtcp.cc335
-rw-r--r--chromium/media/cast/rtcp/rtcp.h71
-rw-r--r--chromium/media/cast/rtcp/rtcp_defines.h42
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.cc242
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver.h21
-rw-r--r--chromium/media/cast/rtcp/rtcp_receiver_unittest.cc280
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.cc403
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender.h29
-rw-r--r--chromium/media/cast/rtcp/rtcp_sender_unittest.cc455
-rw-r--r--chromium/media/cast/rtcp/rtcp_unittest.cc363
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.cc149
-rw-r--r--chromium/media/cast/rtcp/rtcp_utility.h48
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.cc63
-rw-r--r--chromium/media/cast/rtcp/test_rtcp_packet_builder.h27
-rw-r--r--chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc17
-rw-r--r--chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h (renamed from chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h)5
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.cc16
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats.h14
-rw-r--r--chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc43
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h2
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc36
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp (renamed from chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi)2
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h11
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc17
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.cc21
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.gyp3
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver.h11
-rw-r--r--chromium/media/cast/rtp_receiver/rtp_receiver_defines.h (renamed from chromium/media/cast/rtp_common/rtp_defines.h)12
-rw-r--r--chromium/media/cast/test/transport/transport.gyp22
-rw-r--r--chromium/media/cast/test/utility/utility.gyp28
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc98
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp1
-rw-r--r--chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h27
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.cc42
-rw-r--r--chromium/media/cast/video_receiver/video_decoder.h29
-rw-r--r--chromium/media/cast/video_receiver/video_decoder_unittest.cc96
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.cc412
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.gypi1
-rw-r--r--chromium/media/cast/video_receiver/video_receiver.h77
-rw-r--r--chromium/media/cast/video_receiver/video_receiver_unittest.cc93
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc46
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi1
-rw-r--r--chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h16
-rw-r--r--chromium/media/cast/video_sender/mock_video_encoder_controller.cc17
-rw-r--r--chromium/media/cast/video_sender/mock_video_encoder_controller.h5
-rw-r--r--chromium/media/cast/video_sender/video_encoder.cc53
-rw-r--r--chromium/media/cast/video_sender/video_encoder.h31
-rw-r--r--chromium/media/cast/video_sender/video_encoder_unittest.cc116
-rw-r--r--chromium/media/cast/video_sender/video_sender.cc303
-rw-r--r--chromium/media/cast/video_sender/video_sender.gypi5
-rw-r--r--chromium/media/cast/video_sender/video_sender.h53
-rw-r--r--chromium/media/cast/video_sender/video_sender_unittest.cc132
-rw-r--r--chromium/media/cdm/aes_decryptor.cc359
-rw-r--r--chromium/media/cdm/aes_decryptor.h67
-rw-r--r--chromium/media/cdm/aes_decryptor_unittest.cc482
-rw-r--r--chromium/media/cdm/json_web_key.cc163
-rw-r--r--chromium/media/cdm/json_web_key.h47
-rw-r--r--chromium/media/cdm/json_web_key_unittest.cc186
-rw-r--r--chromium/media/cdm/key_system_names.cc27
-rw-r--r--chromium/media/cdm/key_system_names.h33
-rw-r--r--chromium/media/cdm/ppapi/api/OWNERS2
-rw-r--r--chromium/media/cdm/ppapi/api/codereview.settings5
-rw-r--r--chromium/media/cdm/ppapi/api/content_decryption_module.h615
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.cc1041
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.h220
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.cc102
-rw-r--r--chromium/media/cdm/ppapi/cdm_helpers.h230
-rw-r--r--chromium/media/cdm/ppapi/cdm_logging.cc137
-rw-r--r--chromium/media/cdm/ppapi/cdm_logging.h66
-rw-r--r--chromium/media/cdm/ppapi/cdm_video_decoder.cc2
-rw-r--r--chromium/media/cdm/ppapi/cdm_video_decoder.h3
-rw-r--r--chromium/media/cdm/ppapi/cdm_wrapper.cc1196
-rw-r--r--chromium/media/cdm/ppapi/cdm_wrapper.h490
-rw-r--r--chromium/media/cdm/ppapi/clear_key_cdm.cc194
-rw-r--r--chromium/media/cdm/ppapi/clear_key_cdm.h72
-rw-r--r--chromium/media/cdm/ppapi/clear_key_cdm_common.h18
-rw-r--r--chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc193
-rw-r--r--chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h18
-rw-r--r--chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc37
-rw-r--r--chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h14
-rw-r--r--chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc4
-rw-r--r--chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h4
-rw-r--r--chromium/media/cdm/ppapi/linked_ptr.h1
-rw-r--r--chromium/media/cdm/ppapi/supported_cdm_versions.h54
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc43
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.h22
-rw-r--r--chromium/media/ffmpeg/ffmpeg_unittest.cc8
-rw-r--r--chromium/media/filters/audio_decoder_selector.cc108
-rw-r--r--chromium/media/filters/audio_decoder_selector.h10
-rw-r--r--chromium/media/filters/audio_decoder_selector_unittest.cc92
-rw-r--r--chromium/media/filters/audio_file_reader.cc11
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.cc22
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc7
-rw-r--r--chromium/media/filters/audio_renderer_impl.cc289
-rw-r--r--chromium/media/filters/audio_renderer_impl.h85
-rw-r--r--chromium/media/filters/audio_renderer_impl_unittest.cc301
-rw-r--r--chromium/media/filters/chunk_demuxer.cc322
-rw-r--r--chromium/media/filters/chunk_demuxer.h27
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc219
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc77
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.h15
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream_unittest.cc172
-rw-r--r--chromium/media/filters/decrypting_video_decoder.cc10
-rw-r--r--chromium/media/filters/decrypting_video_decoder_unittest.cc6
-rw-r--r--chromium/media/filters/fake_demuxer_stream.cc24
-rw-r--r--chromium/media/filters/fake_video_decoder.cc2
-rw-r--r--chromium/media/filters/fake_video_decoder_unittest.cc8
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc83
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.h8
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc144
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h18
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc134
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc47
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.h10
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder_unittest.cc18
-rw-r--r--chromium/media/filters/gpu_video_accelerator_factories.h1
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc66
-rw-r--r--chromium/media/filters/gpu_video_decoder.h40
-rw-r--r--chromium/media/filters/mock_gpu_video_accelerator_factories.h3
-rw-r--r--chromium/media/filters/opus_audio_decoder.cc363
-rw-r--r--chromium/media/filters/opus_audio_decoder.h24
-rw-r--r--chromium/media/filters/pipeline_integration_perftest.cc92
-rw-r--r--chromium/media/filters/pipeline_integration_test.cc325
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.cc57
-rw-r--r--chromium/media/filters/pipeline_integration_test_base.h22
-rw-r--r--chromium/media/filters/skcanvas_video_renderer.cc11
-rw-r--r--chromium/media/filters/source_buffer_stream.cc586
-rw-r--r--chromium/media/filters/source_buffer_stream.h94
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc273
-rw-r--r--chromium/media/filters/stream_parser_factory.cc40
-rw-r--r--chromium/media/filters/video_decoder_selector.cc4
-rw-r--r--chromium/media/filters/video_frame_stream.cc124
-rw-r--r--chromium/media/filters/video_frame_stream.h3
-rw-r--r--chromium/media/filters/video_frame_stream_unittest.cc59
-rw-r--r--chromium/media/filters/video_renderer_impl.cc (renamed from chromium/media/filters/video_renderer_base.cc)139
-rw-r--r--chromium/media/filters/video_renderer_impl.h (renamed from chromium/media/filters/video_renderer_base.h)38
-rw-r--r--chromium/media/filters/video_renderer_impl_unittest.cc (renamed from chromium/media/filters/video_renderer_base_unittest.cc)149
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc32
-rw-r--r--chromium/media/filters/vpx_video_decoder.h3
-rw-r--r--chromium/media/filters/webvtt_util.h30
-rw-r--r--chromium/media/media.gyp215
-rw-r--r--chromium/media/media_cdm.gypi12
-rw-r--r--chromium/media/media_perftests.isolate14
-rw-r--r--chromium/media/midi/midi_manager.cc28
-rw-r--r--chromium/media/midi/midi_manager.h34
-rw-r--r--chromium/media/midi/midi_manager_mac.cc63
-rw-r--r--chromium/media/midi/midi_manager_mac.h18
-rw-r--r--chromium/media/midi/midi_manager_win.cc597
-rw-r--r--chromium/media/midi/midi_manager_win.h40
-rw-r--r--chromium/media/midi/midi_message_queue.cc119
-rw-r--r--chromium/media/midi/midi_message_queue.h72
-rw-r--r--chromium/media/midi/midi_message_queue_unittest.cc173
-rw-r--r--chromium/media/midi/midi_message_util.cc34
-rw-r--r--chromium/media/midi/midi_message_util.h25
-rw-r--r--chromium/media/midi/midi_message_util_unittest.cc34
-rw-r--r--chromium/media/mp2t/es_parser_adts.cc17
-rw-r--r--chromium/media/mp2t/es_parser_adts.h7
-rw-r--r--chromium/media/mp2t/es_parser_h264.cc22
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser.cc20
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser.h7
-rw-r--r--chromium/media/mp2t/mp2t_stream_parser_unittest.cc25
-rw-r--r--chromium/media/mp3/mp3_stream_parser.cc7
-rw-r--r--chromium/media/mp3/mp3_stream_parser.h1
-rw-r--r--chromium/media/mp3/mp3_stream_parser_unittest.cc22
-rw-r--r--chromium/media/mp4/box_definitions.cc47
-rw-r--r--chromium/media/mp4/box_definitions.h24
-rw-r--r--chromium/media/mp4/fourccs.h1
-rw-r--r--chromium/media/mp4/mp4_stream_parser.cc8
-rw-r--r--chromium/media/mp4/mp4_stream_parser.h1
-rw-r--r--chromium/media/mp4/mp4_stream_parser_unittest.cc21
-rw-r--r--chromium/media/mp4/track_run_iterator.cc23
-rw-r--r--chromium/media/shared_memory_support.gypi2
-rw-r--r--chromium/media/tools/layout_tests/layouttest_analyzer_helpers.py4
-rw-r--r--chromium/media/tools/player_x11/gl_video_renderer.cc10
-rw-r--r--chromium/media/tools/player_x11/player_x11.cc10
-rw-r--r--chromium/media/tools/player_x11/x11_video_renderer.cc6
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.cc134
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.h24
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.cc252
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.h66
-rw-r--r--chromium/media/video/capture/file_video_capture_device.cc300
-rw-r--r--chromium/media/video/capture/file_video_capture_device.h79
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.cc206
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.h28
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.h158
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.mm161
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.h46
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.mm70
-rw-r--r--chromium/media/video/capture/mac/platform_video_capturing_mac.h50
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h113
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm246
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.h28
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.mm209
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h22
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm65
-rw-r--r--chromium/media/video/capture/video_capture.h16
-rw-r--r--chromium/media/video/capture/video_capture_device.cc28
-rw-r--r--chromium/media/video/capture/video_capture_device.h179
-rw-r--r--chromium/media/video/capture/video_capture_device_unittest.cc354
-rw-r--r--chromium/media/video/capture/video_capture_proxy.cc22
-rw-r--r--chromium/media/video/capture/video_capture_proxy.h10
-rw-r--r--chromium/media/video/capture/video_capture_types.cc56
-rw-r--r--chromium/media/video/capture/video_capture_types.h56
-rw-r--r--chromium/media/video/capture/win/capability_list_win.cc10
-rw-r--r--chromium/media/video/capture/win/capability_list_win.h9
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.cc9
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.h7
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.cc62
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.h8
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.cc105
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.h19
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.cc158
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.h16
-rw-r--r--chromium/media/video/video_decode_accelerator.h6
-rw-r--r--chromium/media/webm/chromeos/ebml_writer.cc2
-rw-r--r--chromium/media/webm/chromeos/webm_encoder.cc8
-rw-r--r--chromium/media/webm/tracks_builder.cc9
-rw-r--r--chromium/media/webm/tracks_builder.h11
-rw-r--r--chromium/media/webm/webm_cluster_parser.cc29
-rw-r--r--chromium/media/webm/webm_cluster_parser_unittest.cc21
-rw-r--r--chromium/media/webm/webm_stream_parser.cc43
-rw-r--r--chromium/media/webm/webm_stream_parser.h7
-rw-r--r--chromium/media/webm/webm_tracks_parser.cc21
-rw-r--r--chromium/media/webm/webm_tracks_parser.h11
-rw-r--r--chromium/media/webm/webm_tracks_parser_unittest.cc23
514 files changed, 30057 insertions, 11521 deletions
diff --git a/chromium/media/audio/linux/alsa_input.cc b/chromium/media/audio/alsa/alsa_input.cc
index 929cbe79063..9dcbf2b8662 100644
--- a/chromium/media/audio/linux/alsa_input.cc
+++ b/chromium/media/audio/alsa/alsa_input.cc
@@ -1,19 +1,19 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/linux/alsa_input.h"
+#include "media/audio/alsa/alsa_input.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/time/time.h"
+#include "media/audio/alsa/alsa_output.h"
+#include "media/audio/alsa/alsa_util.h"
+#include "media/audio/alsa/alsa_wrapper.h"
+#include "media/audio/alsa/audio_manager_alsa.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
namespace media {
@@ -24,7 +24,7 @@ static const char kDefaultDevice2[] = "plug:default";
const char AlsaPcmInputStream::kAutoSelectDevice[] = "";
-AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerLinux* audio_manager,
+AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerBase* audio_manager,
const std::string& device_name,
const AudioParameters& params,
AlsaWrapper* wrapper)
diff --git a/chromium/media/audio/linux/alsa_input.h b/chromium/media/audio/alsa/alsa_input.h
index 888e4780ac0..6e9aad9056e 100644
--- a/chromium/media/audio/linux/alsa_input.h
+++ b/chromium/media/audio/alsa/alsa_input.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
-#define MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
+#ifndef MEDIA_AUDIO_ALSA_ALSA_INPUT_H_
+#define MEDIA_AUDIO_ALSA_ALSA_INPUT_H_
#include <alsa/asoundlib.h>
@@ -20,7 +20,7 @@
namespace media {
class AlsaWrapper;
-class AudioManagerLinux;
+class AudioManagerBase;
// Provides an input stream for audio capture based on the ALSA PCM interface.
// This object is not thread safe and all methods should be invoked in the
@@ -34,7 +34,7 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
// Create a PCM Output stream for the ALSA device identified by
// |device_name|. If unsure of what to use for |device_name|, use
// |kAutoSelectDevice|.
- AlsaPcmInputStream(AudioManagerLinux* audio_manager,
+ AlsaPcmInputStream(AudioManagerBase* audio_manager,
const std::string& device_name,
const AudioParameters& params,
AlsaWrapper* wrapper);
@@ -69,7 +69,7 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
// want circular references. Additionally, stream objects live on the audio
// thread, which is owned by the audio manager and we don't want to addref
// the manager from that thread.
- AudioManagerLinux* audio_manager_;
+ AudioManagerBase* audio_manager_;
std::string device_name_;
AudioParameters params_;
int bytes_per_buffer_;
@@ -89,4 +89,4 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
} // namespace media
-#endif // MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
+#endif // MEDIA_AUDIO_ALSA_ALSA_INPUT_H_
diff --git a/chromium/media/audio/linux/alsa_output.cc b/chromium/media/audio/alsa/alsa_output.cc
index fa838354b5a..eccf8ee28a8 100644
--- a/chromium/media/audio/linux/alsa_output.cc
+++ b/chromium/media/audio/alsa/alsa_output.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -32,7 +32,7 @@
// view, it will seem that the device has just clogged and stopped requesting
// data.
-#include "media/audio/linux/alsa_output.h"
+#include "media/audio/alsa/alsa_output.h"
#include <algorithm>
@@ -42,9 +42,9 @@
#include "base/message_loop/message_loop.h"
#include "base/stl_util.h"
#include "base/time/time.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
+#include "media/audio/alsa/alsa_util.h"
+#include "media/audio/alsa/alsa_wrapper.h"
+#include "media/audio/alsa/audio_manager_alsa.h"
#include "media/base/channel_mixer.h"
#include "media/base/data_buffer.h"
#include "media/base/seekable_buffer.h"
@@ -134,7 +134,7 @@ const uint32 AlsaPcmOutputStream::kMinLatencyMicros = 40 * 1000;
AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
const AudioParameters& params,
AlsaWrapper* wrapper,
- AudioManagerLinux* manager)
+ AudioManagerBase* manager)
: requested_device_name_(device_name),
pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample())),
channels_(params.channels()),
diff --git a/chromium/media/audio/linux/alsa_output.h b/chromium/media/audio/alsa/alsa_output.h
index 841615d9d3f..65a23f75124 100644
--- a/chromium/media/audio/linux/alsa_output.h
+++ b/chromium/media/audio/alsa/alsa_output.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -18,8 +18,8 @@
// the audio thread. When modifying the code in this class, please read the
// threading assumptions at the top of the implementation.
-#ifndef MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
-#define MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
+#ifndef MEDIA_AUDIO_ALSA_ALSA_OUTPUT_H_
+#define MEDIA_AUDIO_ALSA_ALSA_OUTPUT_H_
#include <alsa/asoundlib.h>
@@ -40,7 +40,7 @@ class MessageLoop;
namespace media {
class AlsaWrapper;
-class AudioManagerLinux;
+class AudioManagerBase;
class ChannelMixer;
class SeekableBuffer;
@@ -70,7 +70,7 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
AlsaPcmOutputStream(const std::string& device_name,
const AudioParameters& params,
AlsaWrapper* wrapper,
- AudioManagerLinux* manager);
+ AudioManagerBase* manager);
virtual ~AlsaPcmOutputStream();
@@ -187,7 +187,7 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
AlsaWrapper* wrapper_;
// Audio manager that created us. Used to report that we've been closed.
- AudioManagerLinux* manager_;
+ AudioManagerBase* manager_;
// Message loop to use for polling. The object is owned by the AudioManager.
// We hold a reference to the audio thread message loop since
@@ -225,4 +225,4 @@ MEDIA_EXPORT std::ostream& operator<<(std::ostream& os,
}; // namespace media
-#endif // MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
+#endif // MEDIA_AUDIO_ALSA_ALSA_OUTPUT_H_
diff --git a/chromium/media/audio/linux/alsa_output_unittest.cc b/chromium/media/audio/alsa/alsa_output_unittest.cc
index 82fbab94c19..99ae8b02e0a 100644
--- a/chromium/media/audio/linux/alsa_output_unittest.cc
+++ b/chromium/media/audio/alsa/alsa_output_unittest.cc
@@ -1,12 +1,13 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
+#include "media/audio/alsa/alsa_output.h"
+#include "media/audio/alsa/alsa_wrapper.h"
+#include "media/audio/alsa/audio_manager_alsa.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "media/base/data_buffer.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -76,8 +77,9 @@ class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
};
-class MockAudioManagerLinux : public AudioManagerLinux {
+class MockAudioManagerAlsa : public AudioManagerAlsa {
public:
+ MockAudioManagerAlsa() : AudioManagerAlsa(&fake_audio_log_factory_) {}
MOCK_METHOD0(Init, void());
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
@@ -104,12 +106,15 @@ class MockAudioManagerLinux : public AudioManagerLinux {
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
return base::MessageLoop::current()->message_loop_proxy();
}
+
+ private:
+ FakeAudioLogFactory fake_audio_log_factory_;
};
class AlsaPcmOutputStreamTest : public testing::Test {
protected:
AlsaPcmOutputStreamTest() {
- mock_manager_.reset(new StrictMock<MockAudioManagerLinux>());
+ mock_manager_.reset(new StrictMock<MockAudioManagerAlsa>());
}
virtual ~AlsaPcmOutputStreamTest() {
@@ -171,7 +176,7 @@ class AlsaPcmOutputStreamTest : public testing::Test {
static void* kFakeHints[];
StrictMock<MockAlsaWrapper> mock_alsa_wrapper_;
- scoped_ptr<StrictMock<MockAudioManagerLinux> > mock_manager_;
+ scoped_ptr<StrictMock<MockAudioManagerAlsa> > mock_manager_;
base::MessageLoop message_loop_;
scoped_refptr<media::DataBuffer> packet_;
diff --git a/chromium/media/audio/linux/alsa_util.cc b/chromium/media/audio/alsa/alsa_util.cc
index 176ef697741..f26cbd30f2f 100644
--- a/chromium/media/audio/linux/alsa_util.cc
+++ b/chromium/media/audio/alsa/alsa_util.cc
@@ -1,13 +1,13 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/linux/alsa_util.h"
+#include "media/audio/alsa/alsa_util.h"
#include <string>
#include "base/logging.h"
-#include "media/audio/linux/alsa_wrapper.h"
+#include "media/audio/alsa/alsa_wrapper.h"
namespace alsa_util {
diff --git a/chromium/media/audio/linux/alsa_util.h b/chromium/media/audio/alsa/alsa_util.h
index 53cf80af84c..a23ab317dd7 100644
--- a/chromium/media/audio/linux/alsa_util.h
+++ b/chromium/media/audio/alsa/alsa_util.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
-#define MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
+#ifndef MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
+#define MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
#include <alsa/asoundlib.h>
#include <string>
@@ -44,4 +44,4 @@ snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
} // namespace alsa_util
-#endif // MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
+#endif // MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
diff --git a/chromium/media/audio/linux/alsa_wrapper.cc b/chromium/media/audio/alsa/alsa_wrapper.cc
index c1ce359b303..969f3c499f4 100644
--- a/chromium/media/audio/linux/alsa_wrapper.cc
+++ b/chromium/media/audio/alsa/alsa_wrapper.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/linux/alsa_wrapper.h"
+#include "media/audio/alsa/alsa_wrapper.h"
#include <alsa/asoundlib.h>
diff --git a/chromium/media/audio/linux/alsa_wrapper.h b/chromium/media/audio/alsa/alsa_wrapper.h
index 30d94635dd5..4b3c295e582 100644
--- a/chromium/media/audio/linux/alsa_wrapper.h
+++ b/chromium/media/audio/alsa/alsa_wrapper.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -6,6 +6,9 @@
// we want to use. It's purpose is to allow injection of a mock so that the
// higher level code is testable.
+#ifndef MEDIA_AUDIO_ALSA_ALSA_WRAPPER_H_
+#define MEDIA_AUDIO_ALSA_ALSA_WRAPPER_H_
+
#include <alsa/asoundlib.h>
#include "base/basictypes.h"
@@ -79,3 +82,5 @@ class MEDIA_EXPORT AlsaWrapper {
};
} // namespace media
+
+#endif // MEDIA_AUDIO_ALSA_ALSA_WRAPPER_H_
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
new file mode 100644
index 00000000000..ac61a5fa974
--- /dev/null
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -0,0 +1,362 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/alsa/audio_manager_alsa.h"
+
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/nix/xdg_util.h"
+#include "base/process/launch.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_parameters.h"
+#if defined(USE_CRAS)
+#include "media/audio/cras/audio_manager_cras.h"
+#endif
+#include "media/audio/alsa/alsa_input.h"
+#include "media/audio/alsa/alsa_output.h"
+#include "media/audio/alsa/alsa_wrapper.h"
+#if defined(USE_PULSEAUDIO)
+#include "media/audio/pulse/audio_manager_pulse.h"
+#endif
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Default sample rate for input and output streams.
+static const int kDefaultSampleRate = 48000;
+
+// Since "default", "pulse" and "dmix" devices are virtual devices mapped to
+// real devices, we remove them from the list to avoiding duplicate counting.
+// In addition, note that we support no more than 2 channels for recording,
+// hence surround devices are not stored in the list.
+static const char* kInvalidAudioInputDevices[] = {
+ "default",
+ "dmix",
+ "null",
+ "pulse",
+ "surround",
+};
+
+// static
+void AudioManagerAlsa::ShowLinuxAudioInputSettings() {
+ scoped_ptr<base::Environment> env(base::Environment::Create());
+ CommandLine command_line(CommandLine::NO_PROGRAM);
+ switch (base::nix::GetDesktopEnvironment(env.get())) {
+ case base::nix::DESKTOP_ENVIRONMENT_GNOME:
+ command_line.SetProgram(base::FilePath("gnome-volume-control"));
+ break;
+ case base::nix::DESKTOP_ENVIRONMENT_KDE3:
+ case base::nix::DESKTOP_ENVIRONMENT_KDE4:
+ command_line.SetProgram(base::FilePath("kmix"));
+ break;
+ case base::nix::DESKTOP_ENVIRONMENT_UNITY:
+ command_line.SetProgram(base::FilePath("gnome-control-center"));
+ command_line.AppendArg("sound");
+ command_line.AppendArg("input");
+ break;
+ default:
+ LOG(ERROR) << "Failed to show audio input settings: we don't know "
+ << "what command to use for your desktop environment.";
+ return;
+ }
+ base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+}
+
+// Implementation of AudioManager.
+bool AudioManagerAlsa::HasAudioOutputDevices() {
+ return HasAnyAlsaAudioDevice(kStreamPlayback);
+}
+
+bool AudioManagerAlsa::HasAudioInputDevices() {
+ return HasAnyAlsaAudioDevice(kStreamCapture);
+}
+
+AudioManagerAlsa::AudioManagerAlsa(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory),
+ wrapper_(new AlsaWrapper()) {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+}
+
+AudioManagerAlsa::~AudioManagerAlsa() {
+ Shutdown();
+}
+
+void AudioManagerAlsa::ShowAudioInputSettings() {
+ ShowLinuxAudioInputSettings();
+}
+
+void AudioManagerAlsa::GetAudioInputDeviceNames(
+ AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAlsaAudioDevices(kStreamCapture, device_names);
+}
+
+void AudioManagerAlsa::GetAudioOutputDeviceNames(
+ AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAlsaAudioDevices(kStreamPlayback, device_names);
+}
+
+AudioParameters AudioManagerAlsa::GetInputStreamParameters(
+ const std::string& device_id) {
+ static const int kDefaultInputBufferSize = 1024;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultInputBufferSize);
+}
+
+void AudioManagerAlsa::GetAlsaAudioDevices(
+ StreamType type,
+ media::AudioDeviceNames* device_names) {
+ // Constants specified by the ALSA API for device hints.
+ static const char kPcmInterfaceName[] = "pcm";
+ int card = -1;
+
+ // Loop through the sound cards to get ALSA device hints.
+ while (!wrapper_->CardNext(&card) && card >= 0) {
+ void** hints = NULL;
+ int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
+ if (!error) {
+ GetAlsaDevicesInfo(type, hints, device_names);
+
+ // Destroy the hints now that we're done with it.
+ wrapper_->DeviceNameFreeHint(hints);
+ } else {
+ DLOG(WARNING) << "GetAlsaAudioDevices: unable to get device hints: "
+ << wrapper_->StrError(error);
+ }
+ }
+}
+
+void AudioManagerAlsa::GetAlsaDevicesInfo(
+ AudioManagerAlsa::StreamType type,
+ void** hints,
+ media::AudioDeviceNames* device_names) {
+ static const char kIoHintName[] = "IOID";
+ static const char kNameHintName[] = "NAME";
+ static const char kDescriptionHintName[] = "DESC";
+
+ const char* unwanted_device_type = UnwantedDeviceTypeWhenEnumerating(type);
+
+ for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
+ // Only examine devices of the right type. Valid values are
+ // "Input", "Output", and NULL which means both input and output.
+ scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
+ kIoHintName));
+ if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
+ continue;
+
+ // Found a device, prepend the default device since we always want
+ // it to be on the top of the list for all platforms. And there is
+ // no duplicate counting here since it is only done if the list is
+ // still empty. Note, pulse has exclusively opened the default
+ // device, so we must open the device via the "default" moniker.
+ if (device_names->empty()) {
+ device_names->push_front(media::AudioDeviceName(
+ AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+ }
+
+ // Get the unique device name for the device.
+ scoped_ptr_malloc<char> unique_device_name(
+ wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
+
+ // Find out if the device is available.
+ if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
+ // Get the description for the device.
+ scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
+ *hint_iter, kDescriptionHintName));
+
+ media::AudioDeviceName name;
+ name.unique_id = unique_device_name.get();
+ if (desc) {
+ // Use the more user friendly description as name.
+ // Replace '\n' with '-'.
+ char* pret = strchr(desc.get(), '\n');
+ if (pret)
+ *pret = '-';
+ name.device_name = desc.get();
+ } else {
+ // Virtual devices don't necessarily have descriptions.
+ // Use their names instead.
+ name.device_name = unique_device_name.get();
+ }
+
+ // Store the device information.
+ device_names->push_back(name);
+ }
+ }
+}
+
+// static
+bool AudioManagerAlsa::IsAlsaDeviceAvailable(
+ AudioManagerAlsa::StreamType type,
+ const char* device_name) {
+ if (!device_name)
+ return false;
+
+ // We do prefix matches on the device name to see whether to include
+ // it or not.
+ if (type == kStreamCapture) {
+ // Check if the device is in the list of invalid devices.
+ for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
+ if (strncmp(kInvalidAudioInputDevices[i], device_name,
+ strlen(kInvalidAudioInputDevices[i])) == 0)
+ return false;
+ }
+ return true;
+ } else {
+ DCHECK_EQ(kStreamPlayback, type);
+ // We prefer the device type that maps straight to hardware but
+ // goes through software conversion if needed (e.g. incompatible
+ // sample rate).
+ // TODO(joi): Should we prefer "hw" instead?
+ static const char kDeviceTypeDesired[] = "plughw";
+ return strncmp(kDeviceTypeDesired,
+ device_name,
+ arraysize(kDeviceTypeDesired) - 1) == 0;
+ }
+}
+
+// static
+const char* AudioManagerAlsa::UnwantedDeviceTypeWhenEnumerating(
+ AudioManagerAlsa::StreamType wanted_type) {
+ return wanted_type == kStreamPlayback ? "Input" : "Output";
+}
+
+bool AudioManagerAlsa::HasAnyAlsaAudioDevice(
+ AudioManagerAlsa::StreamType stream) {
+ static const char kPcmInterfaceName[] = "pcm";
+ static const char kIoHintName[] = "IOID";
+ void** hints = NULL;
+ bool has_device = false;
+ int card = -1;
+
+ // Loop through the sound cards.
+ // Don't use snd_device_name_hint(-1,..) since there is a access violation
+ // inside this ALSA API with libasound.so.2.0.0.
+ while (!wrapper_->CardNext(&card) && (card >= 0) && !has_device) {
+ int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
+ if (!error) {
+ for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
+ // Only examine devices that are |stream| capable. Valid values are
+ // "Input", "Output", and NULL which means both input and output.
+ scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
+ kIoHintName));
+ const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
+ if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
+ continue; // Wrong type, skip the device.
+
+ // Found an input device.
+ has_device = true;
+ break;
+ }
+
+ // Destroy the hints now that we're done with it.
+ wrapper_->DeviceNameFreeHint(hints);
+ hints = NULL;
+ } else {
+ DLOG(WARNING) << "HasAnyAudioDevice: unable to get device hints: "
+ << wrapper_->StrError(error);
+ }
+ }
+
+ return has_device;
+}
+
+AudioOutputStream* AudioManagerAlsa::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeOutputStream(params);
+}
+
+AudioOutputStream* AudioManagerAlsa::MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ // TODO(xians): Use input_device_id for unified IO.
+ return MakeOutputStream(params);
+}
+
+AudioInputStream* AudioManagerAlsa::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioInputStream* AudioManagerAlsa::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioParameters AudioManagerAlsa::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
+ const AudioParameters& input_params) {
+ // TODO(tommi): Support |output_device_id|.
+ DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
+ static const int kDefaultOutputBufferSize = 2048;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = kDefaultSampleRate;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ // Some clients, such as WebRTC, have a more limited use case and work
+ // acceptably with a smaller buffer size. The check below allows clients
+ // which want to try a smaller buffer size on Linux to do so.
+ // TODO(dalecurtis): This should include bits per channel and channel layout
+ // eventually.
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = std::min(input_params.frames_per_buffer(), buffer_size);
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+}
+
+AudioOutputStream* AudioManagerAlsa::MakeOutputStream(
+ const AudioParameters& params) {
+ std::string device_name = AlsaPcmOutputStream::kAutoSelectDevice;
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kAlsaOutputDevice)) {
+ device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kAlsaOutputDevice);
+ }
+ return new AlsaPcmOutputStream(device_name, params, wrapper_.get(), this);
+}
+
+AudioInputStream* AudioManagerAlsa::MakeInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ std::string device_name = (device_id == AudioManagerBase::kDefaultDeviceId) ?
+ AlsaPcmInputStream::kAutoSelectDevice : device_id;
+ if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kAlsaInputDevice)) {
+ device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kAlsaInputDevice);
+ }
+
+ return new AlsaPcmInputStream(this, device_name, params, wrapper_.get());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/audio_manager_linux.h b/chromium/media/audio/alsa/audio_manager_alsa.h
index ab284dfdce9..155089f06bc 100644
--- a/chromium/media/audio/linux/audio_manager_linux.h
+++ b/chromium/media/audio/alsa/audio_manager_alsa.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
-#define MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
+#ifndef MEDIA_AUDIO_ALSA_AUDIO_MANAGER_ALSA_H_
+#define MEDIA_AUDIO_ALSA_AUDIO_MANAGER_ALSA_H_
#include <string>
#include "base/compiler_specific.h"
@@ -15,9 +15,9 @@ namespace media {
class AlsaWrapper;
-class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
+class MEDIA_EXPORT AudioManagerAlsa : public AudioManagerBase {
public:
- AudioManagerLinux();
+ AudioManagerAlsa(AudioLogFactory* audio_log_factory);
static void ShowLinuxAudioInputSettings();
@@ -45,7 +45,7 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
const AudioParameters& params, const std::string& device_id) OVERRIDE;
protected:
- virtual ~AudioManagerLinux();
+ virtual ~AudioManagerAlsa();
virtual AudioParameters GetPreferredOutputStreamParameters(
const std::string& output_device_id,
@@ -86,9 +86,9 @@ class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
scoped_ptr<AlsaWrapper> wrapper_;
- DISALLOW_COPY_AND_ASSIGN(AudioManagerLinux);
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerAlsa);
};
} // namespace media
-#endif // MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
+#endif // MEDIA_AUDIO_ALSA_AUDIO_MANAGER_ALSA_H_
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index a8e448f821f..e7913265269 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/android/build_info.h"
#include "base/basictypes.h"
#include "base/file_util.h"
#include "base/memory/scoped_ptr.h"
@@ -95,7 +96,9 @@ std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
<< "bytes per buffer: " << params.GetBytesPerBuffer() << endl
<< "bytes per second: " << params.GetBytesPerSecond() << endl
<< "bytes per frame: " << params.GetBytesPerFrame() << endl
- << "frame size in ms: " << ExpectedTimeBetweenCallbacks(params);
+ << "chunk size in ms: " << ExpectedTimeBetweenCallbacks(params) << endl
+ << "echo_canceller: "
+ << (params.effects() & AudioParameters::ECHO_CANCELLER);
return os;
}
@@ -142,7 +145,7 @@ class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
// Log the name of the file which is used as input for this test.
base::FilePath file_path = GetTestDataFilePath(name);
- LOG(INFO) << "Reading from file: " << file_path.value().c_str();
+ VLOG(0) << "Reading from file: " << file_path.value().c_str();
}
virtual ~FileAudioSource() {}
@@ -218,9 +221,9 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
base::FilePath file_path;
EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
file_path = file_path.AppendASCII(file_name.c_str());
- binary_file_ = file_util::OpenFile(file_path, "wb");
+ binary_file_ = base::OpenFile(file_path, "wb");
DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
- LOG(INFO) << "Writing to file: " << file_path.value().c_str();
+ VLOG(0) << "Writing to file: " << file_path.value().c_str();
}
virtual ~FileAudioSink() {
@@ -239,7 +242,7 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
buffer_->Seek(chunk_size);
bytes_written += chunk_size;
}
- file_util::CloseFile(binary_file_);
+ base::CloseFile(binary_file_);
}
// AudioInputStream::AudioInputCallback implementation.
@@ -383,14 +386,14 @@ class FullDuplexAudioSinkSource
DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource);
};
-// Test fixture class.
-class AudioAndroidTest : public testing::Test {
+// Test fixture class for tests which only exercise the output path.
+class AudioAndroidOutputTest : public testing::Test {
public:
- AudioAndroidTest() {}
+ AudioAndroidOutputTest() {}
protected:
virtual void SetUp() {
- audio_manager_.reset(AudioManager::Create());
+ audio_manager_.reset(AudioManager::CreateForTesting());
loop_.reset(new base::MessageLoopForUI());
}
@@ -399,11 +402,6 @@ class AudioAndroidTest : public testing::Test {
AudioManager* audio_manager() { return audio_manager_.get(); }
base::MessageLoopForUI* loop() { return loop_.get(); }
- AudioParameters GetDefaultInputStreamParameters() {
- return audio_manager()->GetInputStreamParameters(
- AudioManagerBase::kDefaultDeviceId);
- }
-
AudioParameters GetDefaultOutputStreamParameters() {
return audio_manager()->GetDefaultOutputStreamParameters();
}
@@ -413,28 +411,28 @@ class AudioAndroidTest : public testing::Test {
.InMillisecondsF();
}
- void StartInputStreamCallbacks(const AudioParameters& params) {
+ void StartOutputStreamCallbacks(const AudioParameters& params) {
double expected_time_between_callbacks_ms =
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioInputStream* stream = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
+ AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
+ params, std::string(), std::string());
EXPECT_TRUE(stream);
int count = 0;
- MockAudioInputCallback sink;
+ MockAudioOutputCallback source;
- EXPECT_CALL(sink,
- OnData(stream, NotNull(), params.GetBytesPerBuffer(), _, _))
+ EXPECT_CALL(source, OnMoreData(NotNull(), _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
- CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
- EXPECT_CALL(sink, OnError(stream)).Times(0);
- EXPECT_CALL(sink, OnClose(stream)).Times(1);
+ DoAll(CheckCountAndPostQuitTask(&count, num_callbacks, loop()),
+ Invoke(&source, &MockAudioOutputCallback::RealOnMoreData)));
+ EXPECT_CALL(source, OnError(stream)).Times(0);
+ EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
EXPECT_TRUE(stream->Open());
- stream->Start(&sink);
+ stream->Start(&source);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
@@ -443,38 +441,83 @@ class AudioAndroidTest : public testing::Test {
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
- LOG(INFO) << "expected time between callbacks: "
- << expected_time_between_callbacks_ms << " ms";
- LOG(INFO) << "average time between callbacks: "
- << average_time_between_callbacks_ms << " ms";
+ VLOG(0) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ VLOG(0) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
1.30 * expected_time_between_callbacks_ms);
}
- void StartOutputStreamCallbacks(const AudioParameters& params) {
+ scoped_ptr<base::MessageLoopForUI> loop_;
+ scoped_ptr<AudioManager> audio_manager_;
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioAndroidOutputTest);
+};
+
+// AudioRecordInputStream should only be created on Jelly Bean and higher. This
+// ensures we only test against the AudioRecord path when that is satisfied.
+std::vector<bool> RunAudioRecordInputPathTests() {
+ std::vector<bool> tests;
+ tests.push_back(false);
+ if (base::android::BuildInfo::GetInstance()->sdk_int() >= 16)
+ tests.push_back(true);
+ return tests;
+}
+
+// Test fixture class for tests which exercise the input path, or both input and
+// output paths. It is value-parameterized to test against both the Java
+// AudioRecord (when true) and native OpenSLES (when false) input paths.
+class AudioAndroidInputTest : public AudioAndroidOutputTest,
+ public testing::WithParamInterface<bool> {
+ public:
+ AudioAndroidInputTest() {}
+
+ protected:
+ AudioParameters GetInputStreamParameters() {
+ AudioParameters input_params = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ // Override the platform effects setting to use the AudioRecord or OpenSLES
+ // path as requested.
+ int effects = GetParam() ? AudioParameters::ECHO_CANCELLER :
+ AudioParameters::NO_EFFECTS;
+ AudioParameters params(input_params.format(),
+ input_params.channel_layout(),
+ input_params.input_channels(),
+ input_params.sample_rate(),
+ input_params.bits_per_sample(),
+ input_params.frames_per_buffer(),
+ effects);
+ return params;
+ }
+
+ void StartInputStreamCallbacks(const AudioParameters& params) {
double expected_time_between_callbacks_ms =
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
+ AudioInputStream* stream = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(stream);
int count = 0;
- MockAudioOutputCallback source;
+ MockAudioInputCallback sink;
- EXPECT_CALL(source, OnMoreData(NotNull(), _))
+ EXPECT_CALL(sink,
+ OnData(stream, NotNull(), params.GetBytesPerBuffer(), _, _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
- DoAll(CheckCountAndPostQuitTask(&count, num_callbacks, loop()),
- Invoke(&source, &MockAudioOutputCallback::RealOnMoreData)));
- EXPECT_CALL(source, OnError(stream)).Times(0);
- EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+ CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
+ EXPECT_CALL(sink, OnError(stream)).Times(0);
+ EXPECT_CALL(sink, OnClose(stream)).Times(1);
EXPECT_TRUE(stream->Open());
- stream->Start(&source);
+ stream->Start(&sink);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
@@ -483,50 +526,50 @@ class AudioAndroidTest : public testing::Test {
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
- LOG(INFO) << "expected time between callbacks: "
- << expected_time_between_callbacks_ms << " ms";
- LOG(INFO) << "average time between callbacks: "
- << average_time_between_callbacks_ms << " ms";
+ VLOG(0) << "expected time between callbacks: "
+ << expected_time_between_callbacks_ms << " ms";
+ VLOG(0) << "average time between callbacks: "
+ << average_time_between_callbacks_ms << " ms";
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
1.30 * expected_time_between_callbacks_ms);
}
- scoped_ptr<base::MessageLoopForUI> loop_;
- scoped_ptr<AudioManager> audio_manager_;
- base::TimeTicks start_time_;
- base::TimeTicks end_time_;
- DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioAndroidInputTest);
};
// Get the default audio input parameters and log the result.
-TEST_F(AudioAndroidTest, GetInputStreamParameters) {
- AudioParameters params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, GetDefaultInputStreamParameters) {
+ // We don't go through AudioAndroidInputTest::GetInputStreamParameters() here
+ // so that we can log the real (non-overridden) values of the effects.
+ AudioParameters params = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(params.IsValid());
VLOG(1) << params;
}
// Get the default audio output parameters and log the result.
-TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) {
+TEST_F(AudioAndroidOutputTest, GetDefaultOutputStreamParameters) {
AudioParameters params = GetDefaultOutputStreamParameters();
EXPECT_TRUE(params.IsValid());
VLOG(1) << params;
}
// Check if low-latency output is supported and log the result as output.
-TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) {
+TEST_F(AudioAndroidOutputTest, IsAudioLowLatencySupported) {
AudioManagerAndroid* manager =
static_cast<AudioManagerAndroid*>(audio_manager());
bool low_latency = manager->IsAudioLowLatencySupported();
- low_latency ? LOG(INFO) << "Low latency output is supported"
- : LOG(INFO) << "Low latency output is *not* supported";
+ low_latency ? VLOG(0) << "Low latency output is supported"
+ : VLOG(0) << "Low latency output is *not* supported";
}
// Ensure that a default input stream can be created and closed.
-TEST_F(AudioAndroidTest, CreateAndCloseInputStream) {
- AudioParameters params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
+ AudioParameters params = GetInputStreamParameters();
AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
@@ -537,7 +580,7 @@ TEST_F(AudioAndroidTest, CreateAndCloseInputStream) {
// TODO(henrika): should we also verify that this API changes the audio mode
// to communication mode, and calls RegisterHeadsetReceiver, the first time
// it is called?
-TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) {
+TEST_F(AudioAndroidOutputTest, CreateAndCloseOutputStream) {
AudioParameters params = GetDefaultOutputStreamParameters();
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
params, std::string(), std::string());
@@ -546,8 +589,8 @@ TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) {
}
// Ensure that a default input stream can be opened and closed.
-TEST_F(AudioAndroidTest, OpenAndCloseInputStream) {
- AudioParameters params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, OpenAndCloseInputStream) {
+ AudioParameters params = GetInputStreamParameters();
AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
@@ -556,7 +599,7 @@ TEST_F(AudioAndroidTest, OpenAndCloseInputStream) {
}
// Ensure that a default output stream can be opened and closed.
-TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) {
+TEST_F(AudioAndroidOutputTest, OpenAndCloseOutputStream) {
AudioParameters params = GetDefaultOutputStreamParameters();
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
params, std::string(), std::string());
@@ -567,8 +610,8 @@ TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) {
// Start input streaming using default input parameters and ensure that the
// callback sequence is sane.
-TEST_F(AudioAndroidTest, StartInputStreamCallbacks) {
- AudioParameters params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, StartInputStreamCallbacks) {
+ AudioParameters params = GetInputStreamParameters();
StartInputStreamCallbacks(params);
}
@@ -576,19 +619,21 @@ TEST_F(AudioAndroidTest, StartInputStreamCallbacks) {
// callback sequence is sane. The only change we make in this test is to select
// a 10ms buffer size instead of the default size.
// TODO(henrika): possibly add support for more variations.
-TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) {
- AudioParameters native_params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, StartInputStreamCallbacksNonDefaultParameters) {
+ AudioParameters native_params = GetInputStreamParameters();
AudioParameters params(native_params.format(),
native_params.channel_layout(),
+ native_params.input_channels(),
native_params.sample_rate(),
native_params.bits_per_sample(),
- native_params.sample_rate() / 100);
+ native_params.sample_rate() / 100,
+ native_params.effects());
StartInputStreamCallbacks(params);
}
// Start output streaming using default output parameters and ensure that the
// callback sequence is sane.
-TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) {
+TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacks) {
AudioParameters params = GetDefaultOutputStreamParameters();
StartOutputStreamCallbacks(params);
}
@@ -598,7 +643,7 @@ TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) {
// select a 10ms buffer size instead of the default size and to open up the
// device in mono.
// TODO(henrika): possibly add support for more variations.
-TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) {
+TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
AudioParameters native_params = GetDefaultOutputStreamParameters();
AudioParameters params(native_params.format(),
CHANNEL_LAYOUT_MONO,
@@ -612,7 +657,7 @@ TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) {
// the rendered audio sounds OK.
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
-TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) {
+TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
AudioParameters params = GetDefaultOutputStreamParameters();
VLOG(1) << params;
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
@@ -639,7 +684,7 @@ TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) {
EXPECT_TRUE(aos->Open());
aos->SetVolume(1.0);
aos->Start(&source);
- LOG(INFO) << ">> Verify that the file is played out correctly...";
+ VLOG(0) << ">> Verify that the file is played out correctly...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
aos->Stop();
aos->Close();
@@ -649,8 +694,8 @@ TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) {
// local audio file.
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
-TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
- AudioParameters params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
+ AudioParameters params = GetInputStreamParameters();
VLOG(1) << params;
AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
params, AudioManagerBase::kDefaultDeviceId);
@@ -666,7 +711,7 @@ TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
EXPECT_TRUE(ais->Open());
ais->Start(&sink);
- LOG(INFO) << ">> Speak into the microphone to record audio...";
+ VLOG(0) << ">> Speak into the microphone to record audio...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
ais->Stop();
ais->Close();
@@ -676,8 +721,8 @@ TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
// streaming is active as well (reads zeros only).
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
-TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
- AudioParameters in_params = GetDefaultInputStreamParameters();
+TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
+ AudioParameters in_params = GetInputStreamParameters();
AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
in_params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
@@ -706,7 +751,7 @@ TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
EXPECT_TRUE(aos->Open());
ais->Start(&sink);
aos->Start(&source);
- LOG(INFO) << ">> Speak into the microphone to record audio";
+ VLOG(0) << ">> Speak into the microphone to record audio";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
aos->Stop();
ais->Stop();
@@ -720,10 +765,10 @@ TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
// printed out during the test.
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
-TEST_F(AudioAndroidTest,
+TEST_P(AudioAndroidInputTest,
DISABLED_RunSymmetricInputAndOutputStreamsInFullDuplex) {
// Get native audio parameters for the input side.
- AudioParameters default_input_params = GetDefaultInputStreamParameters();
+ AudioParameters default_input_params = GetInputStreamParameters();
// Modify the parameters so that both input and output can use the same
// parameters by selecting 10ms as buffer size. This will also ensure that
@@ -756,7 +801,7 @@ TEST_F(AudioAndroidTest,
aos->Start(&full_duplex);
VLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
<< "once per second during this test.";
- LOG(INFO) << ">> Speak into the mic and listen to the audio in loopback...";
+ VLOG(0) << ">> Speak into the mic and listen to the audio in loopback...";
fflush(stdout);
base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20));
printf("\n");
@@ -766,4 +811,7 @@ TEST_F(AudioAndroidTest,
ais->Close();
}
+INSTANTIATE_TEST_CASE_P(AudioAndroidInputTest, AudioAndroidInputTest,
+ testing::ValuesIn(RunAudioRecordInputPathTests()));
+
} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 04b226fa64f..3464d89a30f 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -4,16 +4,27 @@
#include "media/audio/android/audio_manager_android.h"
+#include "base/android/build_info.h"
+#include "base/android/jni_array.h"
+#include "base/android/jni_string.h"
+#include "base/android/scoped_java_ref.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "jni/AudioManagerAndroid_jni.h"
+#include "media/audio/android/audio_record_input.h"
#include "media/audio/android/opensles_input.h"
#include "media/audio/android/opensles_output.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
#include "media/audio/fake_audio_input_stream.h"
#include "media/base/channel_layout.h"
+using base::android::AppendJavaStringArrayToStringVector;
+using base::android::AttachCurrentThread;
+using base::android::ConvertJavaStringToUTF8;
+using base::android::ConvertUTF8ToJavaString;
+using base::android::ScopedJavaLocalRef;
+
namespace media {
static void AddDefaultDevice(AudioDeviceNames* device_names) {
@@ -32,20 +43,24 @@ static const int kAudioModeInCommunication = 0x00000003;
static const int kDefaultInputBufferSize = 1024;
static const int kDefaultOutputBufferSize = 2048;
-AudioManager* CreateAudioManager() {
- return new AudioManagerAndroid();
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
+ return new AudioManagerAndroid(audio_log_factory);
}
-AudioManagerAndroid::AudioManagerAndroid() {
+AudioManagerAndroid::AudioManagerAndroid(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
j_audio_manager_.Reset(
Java_AudioManagerAndroid_createAudioManagerAndroid(
base::android::AttachCurrentThread(),
- base::android::GetApplicationContext()));
+ base::android::GetApplicationContext(),
+ reinterpret_cast<intptr_t>(this)));
+ Init();
}
AudioManagerAndroid::~AudioManagerAndroid() {
+ Close();
Shutdown();
}
@@ -59,28 +74,52 @@ bool AudioManagerAndroid::HasAudioInputDevices() {
void AudioManagerAndroid::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
+ // Always add default device parameters as first element.
AddDefaultDevice(device_names);
+
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jobjectArray> j_device_array =
+ Java_AudioManagerAndroid_getAudioInputDeviceNames(
+ env, j_audio_manager_.obj());
+ jsize len = env->GetArrayLength(j_device_array.obj());
+ AudioDeviceName device;
+ for (jsize i = 0; i < len; ++i) {
+ ScopedJavaLocalRef<jobject> j_device(
+ env, env->GetObjectArrayElement(j_device_array.obj(), i));
+ ScopedJavaLocalRef<jstring> j_device_name =
+ Java_AudioDeviceName_name(env, j_device.obj());
+ ConvertJavaStringToUTF8(env, j_device_name.obj(), &device.device_name);
+ ScopedJavaLocalRef<jstring> j_device_id =
+ Java_AudioDeviceName_id(env, j_device.obj());
+ ConvertJavaStringToUTF8(env, j_device_id.obj(), &device.unique_id);
+ device_names->push_back(device);
+ }
}
void AudioManagerAndroid::GetAudioOutputDeviceNames(
AudioDeviceNames* device_names) {
+ // TODO(henrika): enumerate using GetAudioInputDeviceNames().
AddDefaultDevice(device_names);
}
AudioParameters AudioManagerAndroid::GetInputStreamParameters(
const std::string& device_id) {
+ JNIEnv* env = AttachCurrentThread();
// Use mono as preferred number of input channels on Android to save
// resources. Using mono also avoids a driver issue seen on Samsung
// Galaxy S3 and S4 devices. See http://crbug.com/256851 for details.
ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
int buffer_size = Java_AudioManagerAndroid_getMinInputFrameSize(
- base::android::AttachCurrentThread(), GetNativeOutputSampleRate(),
+ env, GetNativeOutputSampleRate(),
ChannelLayoutToChannelCount(channel_layout));
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ int effects = AudioParameters::NO_EFFECTS;
+ effects |= Java_AudioManagerAndroid_shouldUseAcousticEchoCanceler(env) ?
+ AudioParameters::ECHO_CANCELLER : AudioParameters::NO_EFFECTS;
+ AudioParameters params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0,
GetNativeOutputSampleRate(), 16,
- buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size);
+ buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size, effects);
+ return params;
}
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
@@ -92,8 +131,13 @@ AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
std::string());
if (stream && output_stream_count() == 1) {
SetAudioMode(kAudioModeInCommunication);
- RegisterHeadsetReceiver();
}
+
+ {
+ base::AutoLock lock(streams_lock_);
+ streams_.insert(static_cast<OpenSLESOutputStream*>(stream));
+ }
+
return stream;
}
@@ -107,9 +151,10 @@ AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
void AudioManagerAndroid::ReleaseOutputStream(AudioOutputStream* stream) {
AudioManagerBase::ReleaseOutputStream(stream);
if (!output_stream_count()) {
- UnregisterHeadsetReceiver();
SetAudioMode(kAudioModeNormal);
}
+ base::AutoLock lock(streams_lock_);
+ streams_.erase(static_cast<OpenSLESOutputStream*>(stream));
}
void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
@@ -133,6 +178,9 @@ AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
+ // TODO(henrika): add support for device selection if/when any client
+ // needs it.
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return new OpenSLESInputStream(this, params);
}
@@ -140,6 +188,26 @@ AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ DLOG_IF(ERROR, device_id.empty()) << "Invalid device ID!";
+ // Utilize the device ID to select the correct input device.
+ // Note that the input device is always associated with a certain output
+ // device, i.e., this selection does also switch the output device.
+ // All input and output streams will be affected by the device selection.
+ SetAudioDevice(device_id);
+
+ if (params.effects() != AudioParameters::NO_EFFECTS) {
+ // Platform effects can only be enabled through the AudioRecord path.
+ // An effect should only have been requested here if recommended by
+ // AudioManagerAndroid.shouldUse<Effect>.
+ //
+ // Creating this class requires Jelly Bean, which is already guaranteed by
+ // shouldUse<Effect>. Only DCHECK on that condition to allow tests to use
+ // the effect settings as a way to select the input path.
+ DCHECK_GE(base::android::BuildInfo::GetInstance()->sdk_int(), 16);
+ DVLOG(1) << "Creating AudioRecordInputStream";
+ return new AudioRecordInputStream(this, params);
+ }
+ DVLOG(1) << "Creating OpenSLESInputStream";
return new OpenSLESInputStream(this, params);
}
@@ -181,7 +249,7 @@ AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
// static
@@ -189,22 +257,53 @@ bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
return RegisterNativesImpl(env);
}
-void AudioManagerAndroid::SetAudioMode(int mode) {
- Java_AudioManagerAndroid_setMode(
+void AudioManagerAndroid::Init() {
+ Java_AudioManagerAndroid_init(
base::android::AttachCurrentThread(),
- j_audio_manager_.obj(), mode);
+ j_audio_manager_.obj());
}
-void AudioManagerAndroid::RegisterHeadsetReceiver() {
- Java_AudioManagerAndroid_registerHeadsetReceiver(
+void AudioManagerAndroid::Close() {
+ Java_AudioManagerAndroid_close(
base::android::AttachCurrentThread(),
j_audio_manager_.obj());
}
-void AudioManagerAndroid::UnregisterHeadsetReceiver() {
- Java_AudioManagerAndroid_unregisterHeadsetReceiver(
+void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
+ GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioManagerAndroid::DoSetMuteOnAudioThread,
+ base::Unretained(this),
+ muted));
+}
+
+void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
+ base::AutoLock lock(streams_lock_);
+ for (OutputStreams::iterator it = streams_.begin();
+ it != streams_.end(); ++it) {
+ (*it)->SetMute(muted);
+ }
+}
+
+void AudioManagerAndroid::SetAudioMode(int mode) {
+ Java_AudioManagerAndroid_setMode(
base::android::AttachCurrentThread(),
- j_audio_manager_.obj());
+ j_audio_manager_.obj(), mode);
+}
+
+void AudioManagerAndroid::SetAudioDevice(const std::string& device_id) {
+ JNIEnv* env = AttachCurrentThread();
+
+ // Send the unique device ID to the Java audio manager and make the
+ // device switch. Provide an empty string to the Java audio manager
+ // if the default device is selected.
+ ScopedJavaLocalRef<jstring> j_device_id = ConvertUTF8ToJavaString(
+ env,
+ device_id == AudioManagerBase::kDefaultDeviceId ?
+ std::string() : device_id);
+ Java_AudioManagerAndroid_setDevice(
+ env, j_audio_manager_.obj(), j_device_id.obj());
}
int AudioManagerAndroid::GetNativeOutputSampleRate() {
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index ed2b2c3ce91..2900c0f8e29 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -5,16 +5,21 @@
#ifndef MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
#define MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
+#include <set>
+
#include "base/android/jni_android.h"
#include "base/gtest_prod_util.h"
+#include "base/synchronization/lock.h"
#include "media/audio/audio_manager_base.h"
namespace media {
+class OpenSLESOutputStream;
+
// Android implemention of AudioManager.
class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
public:
- AudioManagerAndroid();
+ AudioManagerAndroid(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
@@ -52,6 +57,8 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
static bool RegisterAudioManager(JNIEnv* env);
+ void SetMute(JNIEnv* env, jobject obj, jboolean muted);
+
protected:
virtual ~AudioManagerAndroid();
@@ -60,20 +67,30 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
+ void Init();
+ void Close();
void SetAudioMode(int mode);
- void RegisterHeadsetReceiver();
- void UnregisterHeadsetReceiver();
+ void SetAudioDevice(const std::string& device_id);
int GetNativeOutputSampleRate();
bool IsAudioLowLatencySupported();
int GetAudioLowLatencyOutputFrameSize();
int GetOptimalOutputFrameSize(int sample_rate, int channels);
+ void DoSetMuteOnAudioThread(bool muted);
+
// Allow the AudioAndroidTest to access private methods.
- FRIEND_TEST_ALL_PREFIXES(AudioAndroidTest, IsAudioLowLatencySupported);
+ FRIEND_TEST_ALL_PREFIXES(AudioAndroidOutputTest, IsAudioLowLatencySupported);
// Java AudioManager instance.
base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
+ typedef std::set<OpenSLESOutputStream*> OutputStreams;
+ OutputStreams streams_;
+ // TODO(wjia): remove this lock once unit test modules are fixed to call
+ // AudioManager::MakeAudioOutputStream on the audio thread. For now, this
+ // lock is used to guard access to |streams_|.
+ base::Lock streams_lock_;
+
DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
};
diff --git a/chromium/media/audio/android/audio_record_input.cc b/chromium/media/audio/android/audio_record_input.cc
new file mode 100644
index 00000000000..15a0c3d3b7b
--- /dev/null
+++ b/chromium/media/audio/android/audio_record_input.cc
@@ -0,0 +1,131 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/android/audio_record_input.h"
+
+#include "base/logging.h"
+#include "jni/AudioRecordInput_jni.h"
+#include "media/audio/android/audio_manager_android.h"
+
+namespace media {
+
+AudioRecordInputStream::AudioRecordInputStream(
+ AudioManagerAndroid* audio_manager, const AudioParameters& params)
+ : audio_manager_(audio_manager),
+ callback_(NULL),
+ direct_buffer_address_(NULL) {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(params.IsValid());
+ j_audio_record_.Reset(
+ Java_AudioRecordInput_createAudioRecordInput(
+ base::android::AttachCurrentThread(),
+ reinterpret_cast<intptr_t>(this),
+ params.sample_rate(),
+ params.channels(),
+ params.bits_per_sample(),
+ params.GetBytesPerBuffer(),
+ params.effects() & AudioParameters::ECHO_CANCELLER));
+}
+
+AudioRecordInputStream::~AudioRecordInputStream() {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void AudioRecordInputStream::CacheDirectBufferAddress(JNIEnv* env, jobject obj,
+ jobject byte_buffer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ direct_buffer_address_ = static_cast<uint8*>(
+ env->GetDirectBufferAddress(byte_buffer));
+}
+
+// static
+bool AudioRecordInputStream::RegisterAudioRecordInput(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+void AudioRecordInputStream::OnData(JNIEnv* env, jobject obj, jint size,
+ jint hardware_delay_bytes) {
+ DCHECK(direct_buffer_address_);
+ // Passing zero as the volume parameter indicates there is no access to a
+ // hardware volume slider.
+ callback_->OnData(this, direct_buffer_address_, size, hardware_delay_bytes,
+ 0.0);
+}
+
+bool AudioRecordInputStream::Open() {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return Java_AudioRecordInput_open(
+ base::android::AttachCurrentThread(), j_audio_record_.obj());
+}
+
+void AudioRecordInputStream::Start(AudioInputCallback* callback) {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(callback);
+
+ if (callback_) {
+ // Start() was already called.
+ DCHECK_EQ(callback_, callback);
+ return;
+ }
+ // The Java thread has not yet started, so we are free to set |callback_|.
+ callback_ = callback;
+
+ Java_AudioRecordInput_start(
+ base::android::AttachCurrentThread(), j_audio_record_.obj());
+}
+
+void AudioRecordInputStream::Stop() {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!callback_) {
+ // Start() was never called, or Stop() was already called.
+ return;
+ }
+
+ Java_AudioRecordInput_stop(
+ base::android::AttachCurrentThread(), j_audio_record_.obj());
+
+ // The Java thread must have been stopped at this point, so we are free to
+ // set |callback_|.
+ callback_->OnClose(this);
+ callback_ = NULL;
+}
+
+void AudioRecordInputStream::Close() {
+ DVLOG(2) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Stop();
+ DCHECK(!callback_);
+ Java_AudioRecordInput_close(
+ base::android::AttachCurrentThread(), j_audio_record_.obj());
+ audio_manager_->ReleaseInputStream(this);
+}
+
+double AudioRecordInputStream::GetMaxVolume() {
+ NOTIMPLEMENTED();
+ return 0.0;
+}
+
+void AudioRecordInputStream::SetVolume(double volume) {
+ NOTIMPLEMENTED();
+}
+
+double AudioRecordInputStream::GetVolume() {
+ NOTIMPLEMENTED();
+ return 0.0;
+}
+
+void AudioRecordInputStream::SetAutomaticGainControl(bool enabled) {
+ NOTIMPLEMENTED();
+}
+
+bool AudioRecordInputStream::GetAutomaticGainControl() {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/android/audio_record_input.h b/chromium/media/audio/android/audio_record_input.h
new file mode 100644
index 00000000000..0a2578b1079
--- /dev/null
+++ b/chromium/media/audio/android/audio_record_input.h
@@ -0,0 +1,72 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ANDROID_AUDIO_RECORD_INPUT_H_
+#define MEDIA_AUDIO_ANDROID_AUDIO_RECORD_INPUT_H_
+
+#include "base/android/jni_android.h"
+#include "base/threading/thread_checker.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerAndroid;
+
+// Implements PCM audio input support for Android using the Java AudioRecord
+// interface. Most of the work is done by its Java counterpart in
+// AudioRecordInput.java. This class is created and lives on the Audio Manager
+// thread but recorded audio buffers are delivered on a thread managed by
+// the Java class.
+//
+// The Java class makes use of AudioEffect features which are first available
+// in Jelly Bean. It should not be instantiated running against earlier SDKs.
+class MEDIA_EXPORT AudioRecordInputStream : public AudioInputStream {
+ public:
+ AudioRecordInputStream(AudioManagerAndroid* manager,
+ const AudioParameters& params);
+
+ virtual ~AudioRecordInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ static bool RegisterAudioRecordInput(JNIEnv* env);
+
+ // Called from Java when data is available.
+ void OnData(JNIEnv* env, jobject obj, jint size, jint hardware_delay_bytes);
+
+ // Called from Java so that we can cache the address of the Java-managed
+ // |byte_buffer| in |direct_buffer_address_|.
+ void CacheDirectBufferAddress(JNIEnv* env, jobject obj, jobject byte_buffer);
+
+ private:
+ base::ThreadChecker thread_checker_;
+ AudioManagerAndroid* audio_manager_;
+
+ // Java AudioRecordInput instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_audio_record_;
+
+ // This is the only member accessed by both the Audio Manager and Java
+ // threads. Explanations for why we do not require explicit synchronization
+ // are given in the implementation.
+ AudioInputCallback* callback_;
+
+ // Owned by j_audio_record_.
+ uint8* direct_buffer_address_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRecordInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ANDROID_AUDIO_RECORD_INPUT_H_
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index a0e4ce3b987..e51ba4f3a97 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -28,7 +28,7 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
active_buffer_index_(0),
buffer_size_bytes_(0),
started_(false) {
- DVLOG(2) << "OpenSLESInputStream::OpenSLESInputStream()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
// Provides sampling rate in milliHertz to OpenSLES.
@@ -49,7 +49,7 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
}
OpenSLESInputStream::~OpenSLESInputStream() {
- DVLOG(2) << "OpenSLESInputStream::~OpenSLESInputStream()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!recorder_object_.Get());
DCHECK(!engine_object_.Get());
@@ -59,7 +59,7 @@ OpenSLESInputStream::~OpenSLESInputStream() {
}
bool OpenSLESInputStream::Open() {
- DVLOG(2) << "OpenSLESInputStream::Open()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
if (engine_object_.Get())
return false;
@@ -73,7 +73,7 @@ bool OpenSLESInputStream::Open() {
}
void OpenSLESInputStream::Start(AudioInputCallback* callback) {
- DVLOG(2) << "OpenSLESInputStream::Start()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(callback);
DCHECK(recorder_);
@@ -116,7 +116,7 @@ void OpenSLESInputStream::Start(AudioInputCallback* callback) {
}
void OpenSLESInputStream::Stop() {
- DVLOG(2) << "OpenSLESInputStream::Stop()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
if (!started_)
return;
@@ -135,7 +135,7 @@ void OpenSLESInputStream::Stop() {
}
void OpenSLESInputStream::Close() {
- DVLOG(2) << "OpenSLESInputStream::Close()";
+ DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
// Stop the stream if it is still recording.
@@ -170,7 +170,9 @@ double OpenSLESInputStream::GetMaxVolume() {
return 0.0;
}
-void OpenSLESInputStream::SetVolume(double volume) { NOTIMPLEMENTED(); }
+void OpenSLESInputStream::SetVolume(double volume) {
+ NOTIMPLEMENTED();
+}
double OpenSLESInputStream::GetVolume() {
NOTIMPLEMENTED();
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
index e05831c6712..cb07d51f78b 100644
--- a/chromium/media/audio/android/opensles_input.h
+++ b/chromium/media/audio/android/opensles_input.h
@@ -21,8 +21,8 @@ class AudioManagerAndroid;
// Implements PCM audio input support for Android using the OpenSLES API.
// This class is created and lives on the Audio Manager thread but recorded
-// audio buffers are given to us from an internal OpenSLES audio thread.
-// All public methods should be called on the Audio Manager thread.
+// audio buffers are delivered on an internal OpenSLES audio thread. All public
+// methods should be called on the Audio Manager thread.
class OpenSLESInputStream : public AudioInputStream {
public:
static const int kMaxNumOfBuffersInQueue = 2;
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index 5643f833c3d..b71680f0a7e 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -28,6 +28,7 @@ OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
active_buffer_index_(0),
buffer_size_bytes_(0),
started_(false),
+ muted_(false),
volume_(1.0) {
DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream()";
format_.formatType = SL_DATAFORMAT_PCM;
@@ -128,6 +129,7 @@ void OpenSLESOutputStream::Stop() {
DCHECK_EQ(0u, buffer_queue_state.index);
#endif
+ callback_ = NULL;
started_ = false;
}
@@ -172,6 +174,12 @@ void OpenSLESOutputStream::GetVolume(double* volume) {
*volume = static_cast<double>(volume_);
}
+void OpenSLESOutputStream::SetMute(bool muted) {
+ DVLOG(2) << "OpenSLESOutputStream::SetMute(" << muted << ")";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ muted_ = muted;
+}
+
bool OpenSLESOutputStream::CreatePlayer() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!engine_object_.Get());
@@ -324,7 +332,7 @@ void OpenSLESOutputStream::FillBufferQueueNoLock() {
// Note: If the internal representation ever changes from 16-bit PCM to
// raw float, the data must be clipped and sanitized since it may come
// from an untrusted source such as NaCl.
- audio_bus_->Scale(volume_);
+ audio_bus_->Scale(muted_ ? 0.0f : volume_);
audio_bus_->ToInterleaved(frames_filled,
format_.bitsPerSample / 8,
audio_data_[active_buffer_index_]);
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index 7232d5da5f7..623b0193894 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -40,6 +40,10 @@ class OpenSLESOutputStream : public AudioOutputStream {
virtual void SetVolume(double volume) OVERRIDE;
virtual void GetVolume(double* volume) OVERRIDE;
+ // Set the value of |muted_|. It does not affect |volume_| which can be
+ // got by calling GetVolume(). See comments for |muted_| below.
+ void SetMute(bool muted);
+
private:
bool CreatePlayer();
@@ -96,6 +100,12 @@ class OpenSLESOutputStream : public AudioOutputStream {
bool started_;
+ // Volume control coming from hardware. It overrides |volume_| when it's
+ // true. Otherwise, use |volume_| for scaling.
+ // This is needed because platform voice volume never goes to zero in
+ // COMMUNICATION mode on Android.
+ bool muted_;
+
// Volume level from 0 to 1.
float volume_;
diff --git a/chromium/media/audio/audio_device_thread.cc b/chromium/media/audio/audio_device_thread.cc
index d5c1bbcebdc..daf908556d8 100644
--- a/chromium/media/audio/audio_device_thread.cc
+++ b/chromium/media/audio/audio_device_thread.cc
@@ -12,7 +12,6 @@
#include "base/message_loop/message_loop.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
-#include "media/audio/audio_util.h"
#include "media/base/audio_bus.h"
using base::PlatformThread;
@@ -29,7 +28,8 @@ class AudioDeviceThread::Thread
public:
Thread(AudioDeviceThread::Callback* callback,
base::SyncSocket::Handle socket,
- const char* thread_name);
+ const char* thread_name,
+ bool synchronized_buffers);
void Start();
@@ -55,6 +55,7 @@ class AudioDeviceThread::Thread
base::CancelableSyncSocket socket_;
base::Lock callback_lock_;
const char* thread_name_;
+ const bool synchronized_buffers_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
@@ -68,10 +69,12 @@ AudioDeviceThread::~AudioDeviceThread() { DCHECK(!thread_.get()); }
void AudioDeviceThread::Start(AudioDeviceThread::Callback* callback,
base::SyncSocket::Handle socket,
- const char* thread_name) {
+ const char* thread_name,
+ bool synchronized_buffers) {
base::AutoLock auto_lock(thread_lock_);
- CHECK(thread_.get() == NULL);
- thread_ = new AudioDeviceThread::Thread(callback, socket, thread_name);
+ CHECK(!thread_);
+ thread_ = new AudioDeviceThread::Thread(
+ callback, socket, thread_name, synchronized_buffers);
thread_->Start();
}
@@ -85,17 +88,19 @@ void AudioDeviceThread::Stop(base::MessageLoop* loop_for_join) {
bool AudioDeviceThread::IsStopped() {
base::AutoLock auto_lock(thread_lock_);
- return thread_.get() == NULL;
+ return !thread_;
}
// AudioDeviceThread::Thread implementation
AudioDeviceThread::Thread::Thread(AudioDeviceThread::Callback* callback,
base::SyncSocket::Handle socket,
- const char* thread_name)
+ const char* thread_name,
+ bool synchronized_buffers)
: thread_(),
callback_(callback),
socket_(socket),
- thread_name_(thread_name) {
+ thread_name_(thread_name),
+ synchronized_buffers_(synchronized_buffers) {
}
AudioDeviceThread::Thread::~Thread() {
@@ -157,6 +162,7 @@ void AudioDeviceThread::Thread::ThreadMain() {
}
void AudioDeviceThread::Thread::Run() {
+ uint32 buffer_index = 0;
while (true) {
int pending_data = 0;
size_t bytes_read = socket_.Receive(&pending_data, sizeof(pending_data));
@@ -165,9 +171,21 @@ void AudioDeviceThread::Thread::Run() {
break;
}
- base::AutoLock auto_lock(callback_lock_);
- if (callback_)
- callback_->Process(pending_data);
+ {
+ base::AutoLock auto_lock(callback_lock_);
+ if (callback_)
+ callback_->Process(pending_data);
+ }
+
+ // Let the other end know which buffer we just filled. The buffer index is
+ // used to ensure the other end is getting the buffer it expects. For more
+ // details on how this works see AudioSyncReader::WaitUntilDataIsReady().
+ if (synchronized_buffers_) {
+ ++buffer_index;
+ size_t bytes_sent = socket_.Send(&buffer_index, sizeof(buffer_index));
+ if (bytes_sent != sizeof(buffer_index))
+ break;
+ }
}
}
diff --git a/chromium/media/audio/audio_device_thread.h b/chromium/media/audio/audio_device_thread.h
index 976f88359ba..7a1a6ed8c4c 100644
--- a/chromium/media/audio/audio_device_thread.h
+++ b/chromium/media/audio/audio_device_thread.h
@@ -12,7 +12,6 @@
#include "base/sync_socket.h"
#include "base/synchronization/lock.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/shared_memory_util.h"
#include "media/base/media_export.h"
namespace base {
@@ -74,10 +73,13 @@ class MEDIA_EXPORT AudioDeviceThread {
AudioDeviceThread();
~AudioDeviceThread();
- // Starts the audio thread. The thread must not already be running.
+ // Starts the audio thread. The thread must not already be running. If
+ // |sychronized_buffers| is set, the browser expects to be notified via the
+ // |socket| every time AudioDeviceThread::Process() completes.
void Start(AudioDeviceThread::Callback* callback,
base::SyncSocket::Handle socket,
- const char* thread_name);
+ const char* thread_name,
+ bool synchronized_buffers);
// This tells the audio thread to stop and clean up the data.
// The method can stop the thread synchronously or asynchronously.
diff --git a/chromium/media/audio/audio_input_controller_unittest.cc b/chromium/media/audio/audio_input_controller_unittest.cc
index 6388cbf975b..a7bb600aaf4 100644
--- a/chromium/media/audio/audio_input_controller_unittest.cc
+++ b/chromium/media/audio/audio_input_controller_unittest.cc
@@ -80,7 +80,7 @@ TEST_F(AudioInputControllerTest, CreateAndClose) {
EXPECT_CALL(event_handler, OnCreated(NotNull()))
.WillOnce(QuitMessageLoop(&message_loop_));
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
kSampleRate, kBitsPerSample, kSamplesPerPacket);
@@ -118,7 +118,7 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
message_loop_.message_loop_proxy()));
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
kSampleRate, kBitsPerSample, kSamplesPerPacket);
@@ -168,7 +168,7 @@ TEST_F(AudioInputControllerTest, RecordAndError) {
.Times(Exactly(1))
.WillOnce(QuitMessageLoop(&message_loop_));
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
kSampleRate, kBitsPerSample, kSamplesPerPacket);
@@ -205,7 +205,7 @@ TEST_F(AudioInputControllerTest, SamplesPerPacketTooLarge) {
EXPECT_CALL(event_handler, OnCreated(NotNull()))
.Times(Exactly(0));
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE,
kChannelLayout,
kSampleRate,
@@ -231,7 +231,7 @@ TEST_F(AudioInputControllerTest, CloseTwice) {
EXPECT_CALL(event_handler, OnRecording(NotNull()))
.Times(Exactly(1));
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE,
kChannelLayout,
kSampleRate,
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index d7685840ecf..d1a6ab89f9f 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -138,10 +138,10 @@ void AudioInputDevice::OnStreamCreated(
return;
DCHECK(audio_thread_.IsStopped());
- audio_callback_.reset(
- new AudioInputDevice::AudioThreadCallback(
- audio_parameters_, handle, length, total_segments, callback_));
- audio_thread_.Start(audio_callback_.get(), socket_handle, "AudioInputDevice");
+ audio_callback_.reset(new AudioInputDevice::AudioThreadCallback(
+ audio_parameters_, handle, length, total_segments, callback_));
+ audio_thread_.Start(
+ audio_callback_.get(), socket_handle, "AudioInputDevice", false);
state_ = RECORDING;
ipc_->RecordStream();
diff --git a/chromium/media/audio/audio_input_unittest.cc b/chromium/media/audio/audio_input_unittest.cc
index 8adb746ee86..838cab3867a 100644
--- a/chromium/media/audio/audio_input_unittest.cc
+++ b/chromium/media/audio/audio_input_unittest.cc
@@ -77,7 +77,7 @@ static AudioInputStream* CreateTestAudioInputStream(AudioManager* audio_man) {
// Test that AudioInputStream rejects out of range parameters.
TEST(AudioInputTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_man.get()))
return;
@@ -111,7 +111,7 @@ TEST(AudioInputTest, SanityOnMakeParams) {
// Test create and close of an AudioInputStream without recording audio.
TEST(AudioInputTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_man.get()))
return;
AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
@@ -126,7 +126,7 @@ TEST(AudioInputTest, CreateAndClose) {
#endif
// Test create, open and close of an AudioInputStream without recording audio.
TEST(AudioInputTest, MAYBE_OpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_man.get()))
return;
AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
@@ -142,7 +142,7 @@ TEST(AudioInputTest, MAYBE_OpenAndClose) {
#endif
// Test create, open, stop and close of an AudioInputStream without recording.
TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_man.get()))
return;
AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
@@ -159,7 +159,7 @@ TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
#endif
// Test a normal recording sequence using an AudioInputStream.
TEST(AudioInputTest, MAYBE_Record) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_man.get()))
return;
base::MessageLoop message_loop(base::MessageLoop::TYPE_DEFAULT);
diff --git a/chromium/media/audio/audio_input_volume_unittest.cc b/chromium/media/audio/audio_input_volume_unittest.cc
index 570c045570e..e89d106f7ed 100644
--- a/chromium/media/audio/audio_input_volume_unittest.cc
+++ b/chromium/media/audio/audio_input_volume_unittest.cc
@@ -8,7 +8,6 @@
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
@@ -39,7 +38,7 @@ double GetVolumeAfterSetVolumeOnLinux(AudioInputStream* ais,
class AudioInputVolumeTest : public ::testing::Test {
protected:
AudioInputVolumeTest()
- : audio_manager_(AudioManager::Create())
+ : audio_manager_(AudioManager::CreateForTesting())
#if defined(OS_WIN)
, com_init_(base::win::ScopedCOMInitializer::kMTA)
#endif
diff --git a/chromium/media/audio/audio_logging.h b/chromium/media/audio/audio_logging.h
new file mode 100644
index 00000000000..1d8366bad75
--- /dev/null
+++ b/chromium/media/audio/audio_logging.h
@@ -0,0 +1,84 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_LOGGING_H_
+#define MEDIA_AUDIO_AUDIO_LOGGING_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+
+namespace media {
+class AudioParameters;
+
+// AudioLog logs state information about an active audio component. Each method
+// takes a |component_id| along with method specific information. Its methods
+// are safe to call from any thread.
+class AudioLog {
+ public:
+ virtual ~AudioLog() {}
+
+ // Called when an audio component is created. |params| are the parameters of
+ // the created stream. |input_device_id| and |output_device_id| are the
+ // respective device ids for input and output. Either one or both may be
+ // specified.
+ virtual void OnCreated(int component_id,
+ const media::AudioParameters& params,
+ const std::string& input_device_id,
+ const std::string& output_device_id) = 0;
+
+ // Called when an audio component is started, generally this is synonymous
+ // with "playing."
+ virtual void OnStarted(int component_id) = 0;
+
+ // Called when an audio component is stopped, generally this is synonymous
+ // with "paused."
+ virtual void OnStopped(int component_id) = 0;
+
+ // Called when an audio component is closed, generally this is synonymous
+ // with "deleted."
+ virtual void OnClosed(int component_id) = 0;
+
+ // Called when an audio component encounters an error.
+ virtual void OnError(int component_id) = 0;
+
+ // Called when an audio component changes volume. |volume| is the new volume.
+ virtual void OnSetVolume(int component_id, double volume) = 0;
+};
+
+// AudioLogFactory dispenses AudioLog instances to owning classes for tracking
+// AudioComponent behavior. All AudioComponents have the concept of an owning
+// class:
+//
+// - AudioInputRendererHost for AudioInputController
+// - AudioRendererHost for AudioOutputController
+// - AudioOutputDispatcherImpl for AudioOutputStream
+//
+// Each of these owning classes may own multiple instances of each component, as
+// such each AudioLog supports logging for multiple instances.
+class AudioLogFactory {
+ public:
+ enum AudioComponent {
+ // Input controllers have a 1:1 mapping with streams, so there's no need to
+ // track both controllers and streams.
+ AUDIO_INPUT_CONTROLLER,
+ // Output controllers may or may not be backed by an active stream, so we
+ // need to track both controllers and streams.
+ AUDIO_OUTPUT_CONTROLLER,
+ AUDIO_OUTPUT_STREAM,
+ AUDIO_COMPONENT_MAX
+ };
+
+ // Create a new AudioLog object for tracking the behavior for one or more
+ // instances of the given component. Each instance of an "owning" class must
+ // create its own AudioLog.
+ virtual scoped_ptr<AudioLog> CreateAudioLog(AudioComponent component) = 0;
+
+ protected:
+ virtual ~AudioLogFactory() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_LOGGING_H_
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
index a616761294d..c0cfa6937cf 100644
--- a/chromium/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -14,12 +14,13 @@
#include "build/build_config.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
-#include "media/audio/linux/audio_manager_linux.h"
+#if defined(USE_ALSA)
+#include "media/audio/alsa/audio_manager_alsa.h"
#elif defined(OS_MACOSX)
#include "media/audio/mac/audio_manager_mac.h"
#elif defined(OS_WIN)
@@ -27,18 +28,22 @@
#include "media/audio/win/core_audio_util_win.h"
#elif defined(OS_ANDROID)
#include "media/audio/android/audio_manager_android.h"
+#else
+#include "media/audio/fake_audio_manager.h"
#endif
namespace media {
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
-typedef AudioManagerLinux AudioManagerAnyPlatform;
+#if defined(USE_ALSA)
+typedef AudioManagerAlsa AudioManagerAnyPlatform;
#elif defined(OS_MACOSX)
typedef AudioManagerMac AudioManagerAnyPlatform;
#elif defined(OS_WIN)
typedef AudioManagerWin AudioManagerAnyPlatform;
#elif defined(OS_ANDROID)
typedef AudioManagerAndroid AudioManagerAnyPlatform;
+#else
+typedef FakeAudioManager AudioManagerAnyPlatform;
#endif
// Limits the number of delay measurements we can store in an array and
@@ -80,7 +85,7 @@ struct AudioDelayState {
// the main thread instead of the audio thread.
class MockAudioManager : public AudioManagerAnyPlatform {
public:
- MockAudioManager() {}
+ MockAudioManager() : AudioManagerAnyPlatform(&fake_audio_log_factory_) {}
virtual ~MockAudioManager() {}
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
@@ -88,6 +93,7 @@ class MockAudioManager : public AudioManagerAnyPlatform {
}
private:
+ FakeAudioLogFactory fake_audio_log_factory_;
DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
};
@@ -156,9 +162,9 @@ class FullDuplexAudioSinkSource
EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
file_name = file_name.AppendASCII(kDelayValuesFileName);
- FILE* text_file = file_util::OpenFile(file_name, "wt");
+ FILE* text_file = base::OpenFile(file_name, "wt");
DLOG_IF(ERROR, !text_file) << "Failed to open log file.";
- LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
+ VLOG(0) << ">> Output file " << file_name.value() << " has been created.";
// Write the array which contains time-stamps, buffer size and
// audio delays values to a text file.
@@ -174,7 +180,7 @@ class FullDuplexAudioSinkSource
++elements_written;
}
- file_util::CloseFile(text_file);
+ base::CloseFile(text_file);
}
// AudioInputStream::AudioInputCallback.
@@ -421,10 +427,10 @@ TEST_F(AudioLowLatencyInputOutputTest, DISABLED_FullDuplexDelayMeasurement) {
FullDuplexAudioSinkSource full_duplex(
aisw.sample_rate(), aisw.samples_per_packet(), aisw.channels());
- LOG(INFO) << ">> You should now be able to hear yourself in loopback...";
- DLOG(INFO) << " sample_rate : " << aisw.sample_rate();
- DLOG(INFO) << " samples_per_packet: " << aisw.samples_per_packet();
- DLOG(INFO) << " channels : " << aisw.channels();
+ VLOG(0) << ">> You should now be able to hear yourself in loopback...";
+ DVLOG(0) << " sample_rate : " << aisw.sample_rate();
+ DVLOG(0) << " samples_per_packet: " << aisw.samples_per_packet();
+ DVLOG(0) << " channels : " << aisw.channels();
ais->Start(&full_duplex);
aos->Start(&full_duplex);
diff --git a/chromium/media/audio/audio_manager.cc b/chromium/media/audio/audio_manager.cc
index 3f49a45ad87..03eeb171d70 100644
--- a/chromium/media/audio/audio_manager.cc
+++ b/chromium/media/audio/audio_manager.cc
@@ -4,12 +4,12 @@
#include "media/audio/audio_manager.h"
-#include "base/at_exit.h"
-#include "base/atomicops.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "media/audio/fake_audio_log_factory.h"
namespace media {
namespace {
@@ -17,24 +17,30 @@ AudioManager* g_last_created = NULL;
}
// Forward declaration of the platform specific AudioManager factory function.
-AudioManager* CreateAudioManager();
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory);
-AudioManager::AudioManager() {
-}
+AudioManager::AudioManager() {}
AudioManager::~AudioManager() {
- CHECK(g_last_created == NULL || g_last_created == this);
+ CHECK(!g_last_created || g_last_created == this);
g_last_created = NULL;
}
// static
-AudioManager* AudioManager::Create() {
- CHECK(g_last_created == NULL);
- g_last_created = CreateAudioManager();
+AudioManager* AudioManager::Create(AudioLogFactory* audio_log_factory) {
+ CHECK(!g_last_created);
+ g_last_created = CreateAudioManager(audio_log_factory);
return g_last_created;
}
// static
+AudioManager* AudioManager::CreateForTesting() {
+ static base::LazyInstance<FakeAudioLogFactory>::Leaky fake_log_factory =
+ LAZY_INSTANCE_INITIALIZER;
+ return Create(fake_log_factory.Pointer());
+}
+
+// static
AudioManager* AudioManager::Get() {
return g_last_created;
}
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index 891d2a26589..0ca468ed4dd 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -11,6 +11,7 @@
#include "base/memory/ref_counted.h"
#include "base/strings/string16.h"
#include "media/audio/audio_device_name.h"
+#include "media/audio/audio_logging.h"
#include "media/audio/audio_parameters.h"
namespace base {
@@ -23,16 +24,19 @@ namespace media {
class AudioInputStream;
class AudioOutputStream;
-// Manages all audio resources. In particular it owns the AudioOutputStream
-// objects. Provides some convenience functions that avoid the need to provide
-// iterators over the existing streams.
+// Manages all audio resources. Provides some convenience functions that avoid
+// the need to provide iterators over the existing streams.
class MEDIA_EXPORT AudioManager {
- public:
- virtual ~AudioManager();
+ public:
+ virtual ~AudioManager();
- // Use to construct the audio manager.
- // NOTE: There should only be one instance.
- static AudioManager* Create();
+ // Construct the audio manager; only one instance is allowed. The manager
+ // will forward CreateAudioLog() calls to the provided AudioLogFactory; as
+ // such |audio_log_factory| must outlive the AudioManager.
+ static AudioManager* Create(AudioLogFactory* audio_log_factory);
+
+ // Similar to Create() except uses a FakeAudioLogFactory for testing.
+ static AudioManager* CreateForTesting();
// Returns the pointer to the last created instance, or NULL if not yet
// created. This is a utility method for the code outside of media directory,
@@ -50,7 +54,7 @@ class MEDIA_EXPORT AudioManager {
// Returns a human readable string for the model/make of the active audio
// input device for this computer.
- virtual string16 GetAudioInputDeviceModel() = 0;
+ virtual base::string16 GetAudioInputDeviceModel() = 0;
// Opens the platform default audio input settings UI.
// Note: This could invoke an external application/preferences pane, so
@@ -62,10 +66,16 @@ class MEDIA_EXPORT AudioManager {
// which must initially be empty. It is not guaranteed that all the
// devices in the list support all formats and sample rates for
// recording.
+ //
+ // Not threadsafe; in production this should only be called from the
+ // Audio IO thread (see GetMessageLoop).
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
// Appends a list of available output devices to |device_names|,
// which must initially be empty.
+ //
+ // Not threadsafe; in production this should only be called from the
+ // Audio IO thread (see GetMessageLoop).
virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
// Factory for all the supported stream formats. |params| defines parameters
@@ -169,6 +179,16 @@ class MEDIA_EXPORT AudioManager {
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) = 0;
+ // Create a new AudioLog object for tracking the behavior for one or more
+ // instances of the given component. See AudioLogFactory for more details.
+ virtual scoped_ptr<AudioLog> CreateAudioLog(
+ AudioLogFactory::AudioComponent component) = 0;
+
+ // Called when a component has detected a OS level audio wedge. Shuts down
+ // all active audio streams and then restarts them transparently. See
+ // http://crbug.com/160920
+ virtual void FixWedgedAudio() = 0;
+
protected:
AudioManager();
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index 5b1f4b3690a..f7b590ae724 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -8,12 +8,11 @@
#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/message_loop/message_loop_proxy.h"
-#include "base/threading/thread.h"
+#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_output_proxy.h"
#include "media/audio/audio_output_resampler.h"
-#include "media/audio/audio_util.h"
#include "media/audio/fake_audio_input_stream.h"
#include "media/audio/fake_audio_output_stream.h"
#include "media/base/media_switches.h"
@@ -78,7 +77,7 @@ class AudioManagerBase::CompareByParams {
const DispatcherParams* dispatcher_;
};
-AudioManagerBase::AudioManagerBase()
+AudioManagerBase::AudioManagerBase(AudioLogFactory* audio_log_factory)
: max_num_output_streams_(kDefaultMaxOutputStreams),
max_num_input_streams_(kDefaultMaxInputStreams),
num_output_streams_(0),
@@ -87,9 +86,10 @@ AudioManagerBase::AudioManagerBase()
// block the UI thread when swapping devices.
output_listeners_(
ObserverList<AudioDeviceListener>::NOTIFY_EXISTING_ONLY),
- audio_thread_(new base::Thread("AudioThread")) {
+ audio_thread_("AudioThread"),
+ audio_log_factory_(audio_log_factory) {
#if defined(OS_WIN)
- audio_thread_->init_com_with_mta(true);
+ audio_thread_.init_com_with_mta(true);
#elif defined(OS_MACOSX)
// CoreAudio calls must occur on the main thread of the process, which in our
// case is sadly the browser UI thread. Failure to execute calls on the right
@@ -104,8 +104,8 @@ AudioManagerBase::AudioManagerBase()
}
#endif
- CHECK(audio_thread_->Start());
- message_loop_ = audio_thread_->message_loop_proxy();
+ CHECK(audio_thread_.Start());
+ message_loop_ = audio_thread_.message_loop_proxy();
}
AudioManagerBase::~AudioManagerBase() {
@@ -114,15 +114,15 @@ AudioManagerBase::~AudioManagerBase() {
// stopping the thread, resulting an unexpected behavior.
// This way we make sure activities of the audio streams are all stopped
// before we destroy them.
- CHECK(!audio_thread_.get());
+ CHECK(!audio_thread_.IsRunning());
// All the output streams should have been deleted.
DCHECK_EQ(0, num_output_streams_);
// All the input streams should have been deleted.
DCHECK_EQ(0, num_input_streams_);
}
-string16 AudioManagerBase::GetAudioInputDeviceModel() {
- return string16();
+base::string16 AudioManagerBase::GetAudioInputDeviceModel() {
+ return base::string16();
}
scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
@@ -131,10 +131,10 @@ scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
// Lazily start the worker thread.
- if (!audio_thread_->IsRunning())
- CHECK(audio_thread_->Start());
+ if (!audio_thread_.IsRunning())
+ CHECK(audio_thread_.Start());
- return audio_thread_->message_loop_proxy();
+ return audio_thread_.message_loop_proxy();
}
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
@@ -330,17 +330,6 @@ void AudioManagerBase::ReleaseInputStream(AudioInputStream* stream) {
}
void AudioManagerBase::Shutdown() {
- // To avoid running into deadlocks while we stop the thread, shut it down
- // via a local variable while not holding the audio thread lock.
- scoped_ptr<base::Thread> audio_thread;
- {
- base::AutoLock lock(audio_thread_lock_);
- audio_thread_.swap(audio_thread);
- }
-
- if (!audio_thread)
- return;
-
// Only true when we're sharing the UI message loop with the browser. The UI
// loop is no longer running at this time and browser destruction is imminent.
if (message_loop_->BelongsToCurrentThread()) {
@@ -351,27 +340,24 @@ void AudioManagerBase::Shutdown() {
}
// Stop() will wait for any posted messages to be processed first.
- audio_thread->Stop();
+ audio_thread_.Stop();
}
void AudioManagerBase::ShutdownOnAudioThread() {
- // This should always be running on the audio thread, but since we've cleared
- // the audio_thread_ member pointer when we get here, we can't verify exactly
- // what thread we're running on. The method is not public though and only
- // called from one place, so we'll leave it at that.
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
for (; it != output_dispatchers_.end(); ++it) {
scoped_refptr<AudioOutputDispatcher>& dispatcher = (*it)->dispatcher;
- if (dispatcher.get()) {
- dispatcher->Shutdown();
- // All AudioOutputProxies must have been freed before Shutdown is called.
- // If they still exist, things will go bad. They have direct pointers to
- // both physical audio stream objects that belong to the dispatcher as
- // well as the message loop of the audio thread that will soon go away.
- // So, better crash now than later.
- DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
- dispatcher = NULL;
- }
+ dispatcher->Shutdown();
+
+ // All AudioOutputProxies must have been freed before Shutdown is called.
+ // If they still exist, things will go bad. They have direct pointers to
+ // both physical audio stream objects that belong to the dispatcher as
+ // well as the message loop of the audio thread that will soon go away.
+ // So, better crash now than later.
+ DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
+ dispatcher = NULL;
}
output_dispatchers_.clear();
@@ -419,8 +405,51 @@ std::string AudioManagerBase::GetAssociatedOutputDeviceID(
}
std::string AudioManagerBase::GetDefaultOutputDeviceID() {
- NOTIMPLEMENTED();
return "";
}
+int AudioManagerBase::GetUserBufferSize() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ int buffer_size = 0;
+ std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
+ switches::kAudioBufferSize));
+ if (base::StringToInt(buffer_size_str, &buffer_size) && buffer_size > 0)
+ return buffer_size;
+
+ return 0;
+}
+
+scoped_ptr<AudioLog> AudioManagerBase::CreateAudioLog(
+ AudioLogFactory::AudioComponent component) {
+ return audio_log_factory_->CreateAudioLog(component);
+}
+
+void AudioManagerBase::FixWedgedAudio() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+#if defined(OS_MACOSX)
+ // Through trial and error, we've found that one way to restore audio after a
+ // hang is to close all outstanding audio streams. Once all streams have been
+ // closed, new streams appear to work correctly.
+ //
+ // In Chrome terms, this means we need to ask all AudioOutputDispatchers to
+ // close all Open()'d streams. Once all streams across all dispatchers have
+ // been closed, we ask for all previously Start()'d streams to be recreated
+ // using the same AudioSourceCallback they had before.
+ //
+ // Since this operation takes place on the audio thread we can be sure that no
+ // other state-changing stream operations will take place while the fix is in
+ // progress.
+ //
+ // See http://crbug.com/160920 for additional details.
+ for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
+ it != output_dispatchers_.end(); ++it) {
+ (*it)->dispatcher->CloseStreamsForWedgeFix();
+ }
+ for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
+ it != output_dispatchers_.end(); ++it) {
+ (*it)->dispatcher->RestartStreamsForWedgeFix();
+ }
+#endif
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
index cdf7d3a76ae..09b021a0d2b 100644
--- a/chromium/media/audio/audio_manager_base.h
+++ b/chromium/media/audio/audio_manager_base.h
@@ -12,7 +12,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/scoped_vector.h"
#include "base/observer_list.h"
-#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_output_dispatcher.h"
@@ -21,10 +21,6 @@
#include "base/win/scoped_com_initializer.h"
#endif
-namespace base {
-class Thread;
-}
-
namespace media {
class AudioOutputDispatcher;
@@ -55,7 +51,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+ virtual base::string16 GetAudioInputDeviceModel() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
@@ -119,9 +115,13 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) OVERRIDE;
- protected:
- AudioManagerBase();
+ virtual scoped_ptr<AudioLog> CreateAudioLog(
+ AudioLogFactory::AudioComponent component) OVERRIDE;
+
+ virtual void FixWedgedAudio() OVERRIDE;
+ protected:
+ AudioManagerBase(AudioLogFactory* audio_log_factory);
// Shuts down the audio thread and releases all the audio output dispatchers
// on the audio thread. All audio streams should be freed before Shutdown()
@@ -136,6 +136,10 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// thread.
void NotifyAllOutputDeviceChangeListeners();
+ // Returns user buffer size as specified on the command line or 0 if no buffer
+ // size has been specified.
+ int GetUserBufferSize();
+
// Returns the preferred hardware audio output parameters for opening output
// streams. If the users inject a valid |input_params|, each AudioManager
// will decide if they should return the values from |input_params| or the
@@ -181,8 +185,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
ObserverList<AudioDeviceListener> output_listeners_;
// Thread used to interact with audio streams created by this audio manager.
- scoped_ptr<base::Thread> audio_thread_;
- mutable base::Lock audio_thread_lock_;
+ base::Thread audio_thread_;
// The message loop of the audio thread this object runs on. Used for internal
// tasks which run on the audio thread even after Shutdown() has been started
@@ -193,6 +196,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// from the audio thread (no locking).
AudioOutputDispatchers output_dispatchers_;
+ // Proxy for creating AudioLog objects.
+ AudioLogFactory* const audio_log_factory_;
+
DISALLOW_COPY_AND_ASSIGN(AudioManagerBase);
};
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 4747c2e2996..8c6cc10b423 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -7,11 +7,12 @@
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_LINUX)
-#include "media/audio/linux/audio_manager_linux.h"
-#endif // defined(OS_LINUX)
+#if defined(USE_ALSA)
+#include "media/audio/alsa/audio_manager_alsa.h"
+#endif // defined(USE_ALSA)
#if defined(OS_WIN)
#include "base/win/scoped_com_initializer.h"
@@ -31,7 +32,7 @@ class AudioManagerTest
: public ::testing::Test {
protected:
AudioManagerTest()
- : audio_manager_(AudioManager::Create())
+ : audio_manager_(AudioManager::CreateForTesting())
#if defined(OS_WIN)
, com_init_(base::win::ScopedCOMInitializer::kMTA)
#endif
@@ -111,6 +112,17 @@ class AudioManagerTest
return audio_manager_->HasAudioOutputDevices();
}
+#if defined(USE_ALSA) || defined(USE_PULSEAUDIO)
+ template <class T>
+ void CreateAudioManagerForTesting() {
+ // Only one AudioManager may exist at a time, so destroy the one we're
+ // currently holding before creating a new one.
+ audio_manager_.reset();
+ audio_manager_.reset(T::Create(&fake_audio_log_factory_));
+ }
+#endif
+
+ FakeAudioLogFactory fake_audio_log_factory_;
scoped_ptr<AudioManager> audio_manager_;
#if defined(OS_WIN)
@@ -254,7 +266,7 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesPulseaudio) {
if (!CanRunInputTest())
return;
- audio_manager_.reset(AudioManagerPulse::Create());
+ CreateAudioManagerForTesting<AudioManagerPulse>();
if (audio_manager_.get()) {
AudioDeviceNames device_names;
audio_manager_->GetAudioInputDeviceNames(&device_names);
@@ -268,7 +280,7 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesPulseaudio) {
if (!CanRunOutputTest())
return;
- audio_manager_.reset(AudioManagerPulse::Create());
+ CreateAudioManagerForTesting<AudioManagerPulse>();
if (audio_manager_.get()) {
AudioDeviceNames device_names;
audio_manager_->GetAudioOutputDeviceNames(&device_names);
@@ -288,8 +300,8 @@ TEST_F(AudioManagerTest, EnumerateInputDevicesAlsa) {
if (!CanRunInputTest())
return;
- VLOG(2) << "Testing AudioManagerLinux.";
- audio_manager_.reset(new AudioManagerLinux());
+ VLOG(2) << "Testing AudioManagerAlsa.";
+ CreateAudioManagerForTesting<AudioManagerAlsa>();
AudioDeviceNames device_names;
audio_manager_->GetAudioInputDeviceNames(&device_names);
CheckDeviceNames(device_names);
@@ -299,8 +311,8 @@ TEST_F(AudioManagerTest, EnumerateOutputDevicesAlsa) {
if (!CanRunOutputTest())
return;
- VLOG(2) << "Testing AudioManagerLinux.";
- audio_manager_.reset(new AudioManagerLinux());
+ VLOG(2) << "Testing AudioManagerAlsa.";
+ CreateAudioManagerForTesting<AudioManagerAlsa>();
AudioDeviceNames device_names;
audio_manager_->GetAudioOutputDeviceNames(&device_names);
CheckDeviceNames(device_names);
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index 649612cd4f6..92f9f25de53 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -8,11 +8,10 @@
#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
+#include "base/task_runner_util.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/shared_memory_util.h"
#include "media/base/scoped_histogram_timer.h"
using base::Time;
@@ -58,7 +57,7 @@ AudioOutputController::AudioOutputController(
params.sample_rate(),
TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMillis)),
#endif
- number_polling_attempts_left_(0) {
+ on_more_io_data_called_(0) {
DCHECK(audio_manager);
DCHECK(handler_);
DCHECK(sync_reader_);
@@ -112,9 +111,28 @@ void AudioOutputController::SetVolume(double volume) {
&AudioOutputController::DoSetVolume, this, volume));
}
+void AudioOutputController::GetOutputDeviceId(
+ base::Callback<void(const std::string&)> callback) const {
+ base::PostTaskAndReplyWithResult(
+ message_loop_.get(),
+ FROM_HERE,
+ base::Bind(&AudioOutputController::DoGetOutputDeviceId, this),
+ callback);
+}
+
+void AudioOutputController::SwitchOutputDevice(
+ const std::string& output_device_id, const base::Closure& callback) {
+ message_loop_->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&AudioOutputController::DoSwitchOutputDevice, this,
+ output_device_id),
+ callback);
+}
+
void AudioOutputController::DoCreate(bool is_for_device_change) {
DCHECK(message_loop_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.CreateTime");
+ TRACE_EVENT0("audio", "AudioOutputController::DoCreate");
// Close() can be called before DoCreate() is executed.
if (state_ == kClosed)
@@ -159,6 +177,7 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
void AudioOutputController::DoPlay() {
DCHECK(message_loop_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.PlayTime");
+ TRACE_EVENT0("audio", "AudioOutputController::DoPlay");
// We can start from created or paused state.
if (state_ != kCreated && state_ != kPaused)
@@ -179,10 +198,26 @@ void AudioOutputController::DoPlay() {
power_poll_callback_.callback().Run();
#endif
- // We start the AudioOutputStream lazily.
+ on_more_io_data_called_ = 0;
AllowEntryToOnMoreIOData();
stream_->Start(this);
+ // For UMA tracking purposes, start the wedge detection timer. This allows us
+ // to record statistics about the number of wedged playbacks in the field.
+ //
+ // WedgeCheck() will look to see if |on_more_io_data_called_| is true after
+ // the timeout expires. Care must be taken to ensure the wedge check delay is
+ // large enough that the value isn't queried while OnMoreDataIO() is setting
+ // it.
+ //
+ // Timer self-manages its lifetime and WedgeCheck() will only record the UMA
+ // statistic if state is still kPlaying. Additional Start() calls will
+ // invalidate the previous timer.
+ wedge_timer_.reset(new base::OneShotTimer<AudioOutputController>());
+ wedge_timer_->Start(
+ FROM_HERE, TimeDelta::FromSeconds(5), this,
+ &AudioOutputController::WedgeCheck);
+
handler_->OnPlaying();
}
@@ -202,6 +237,7 @@ void AudioOutputController::StopStream() {
DCHECK(message_loop_->BelongsToCurrentThread());
if (state_ == kPlaying) {
+ wedge_timer_.reset();
stream_->Stop();
DisallowEntryToOnMoreIOData();
@@ -216,14 +252,17 @@ void AudioOutputController::StopStream() {
void AudioOutputController::DoPause() {
DCHECK(message_loop_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.PauseTime");
+ TRACE_EVENT0("audio", "AudioOutputController::DoPause");
StopStream();
if (state_ != kPaused)
return;
- // Send a special pause mark to the low-latency audio thread.
- sync_reader_->UpdatePendingBytes(kPauseMark);
+ // Let the renderer know we've stopped. Necessary to let PPAPI clients know
+ // audio has been shutdown. TODO(dalecurtis): This stinks. PPAPI should have
+ // a better way to know when it should exit PPB_Audio_Shared::Run().
+ sync_reader_->UpdatePendingBytes(-1);
#if defined(AUDIO_POWER_MONITORING)
// Paused means silence follows.
@@ -236,6 +275,7 @@ void AudioOutputController::DoPause() {
void AudioOutputController::DoClose() {
DCHECK(message_loop_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.CloseTime");
+ TRACE_EVENT0("audio", "AudioOutputController::DoClose");
if (state_ != kClosed) {
DoStopCloseAndClearStream();
@@ -262,6 +302,31 @@ void AudioOutputController::DoSetVolume(double volume) {
}
}
+std::string AudioOutputController::DoGetOutputDeviceId() const {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ return output_device_id_;
+}
+
+void AudioOutputController::DoSwitchOutputDevice(
+ const std::string& output_device_id) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ == kClosed)
+ return;
+
+ if (output_device_id == output_device_id_)
+ return;
+
+ output_device_id_ = output_device_id;
+
+ // If output is currently diverted, we must not call OnDeviceChange
+ // since it would break the diverted setup. Once diversion is
+ // finished using StopDiverting() the output will switch to the new
+ // device ID.
+ if (stream_ != diverting_to_stream_)
+ OnDeviceChange();
+}
+
void AudioOutputController::DoReportError() {
DCHECK(message_loop_->BelongsToCurrentThread());
if (state_ != kClosed)
@@ -279,26 +344,16 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
DisallowEntryToOnMoreIOData();
TRACE_EVENT0("audio", "AudioOutputController::OnMoreIOData");
- // The OS level audio APIs on Linux and Windows all have problems requesting
- // data on a fixed interval. Sometimes they will issue calls back to back
- // which can cause glitching, so wait until the renderer is ready.
- //
- // We also need to wait when diverting since the virtual stream will call this
- // multiple times without waiting.
- //
- // NEVER wait on OSX unless a virtual stream is connected, otherwise we can
- // end up hanging the entire OS.
- //
- // See many bugs for context behind this decision: http://crbug.com/170498,
- // http://crbug.com/171651, http://crbug.com/174985, and more.
-#if defined(OS_WIN) || defined(OS_LINUX)
- const bool kShouldBlock = true;
-#else
- const bool kShouldBlock = diverting_to_stream_ != NULL;
-#endif
+ // Indicate that we haven't wedged (at least not indefinitely, WedgeCheck()
+ // may have already fired if OnMoreIOData() took an abnormal amount of time).
+ // Since this thread is the only writer of |on_more_io_data_called_| once the
+ // thread starts, its safe to compare and then increment.
+ if (base::AtomicRefCountIsZero(&on_more_io_data_called_))
+ base::AtomicRefCountInc(&on_more_io_data_called_);
- const int frames = sync_reader_->Read(kShouldBlock, source, dest);
- DCHECK_LE(0, frames);
+ sync_reader_->Read(source, dest);
+
+ const int frames = dest->frames();
sync_reader_->UpdatePendingBytes(
buffers_state.total_bytes() + frames * params_.GetBytesPerFrame());
@@ -339,6 +394,7 @@ void AudioOutputController::DoStopCloseAndClearStream() {
void AudioOutputController::OnDeviceChange() {
DCHECK(message_loop_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.DeviceChangeTime");
+ TRACE_EVENT0("audio", "AudioOutputController::OnDeviceChange");
// TODO(dalecurtis): Notify the renderer side that a device change has
// occurred. Currently querying the hardware information here will lead to
@@ -417,4 +473,21 @@ void AudioOutputController::DisallowEntryToOnMoreIOData() {
DCHECK(is_zero);
}
+void AudioOutputController::WedgeCheck() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // If we should be playing and we haven't, that's a wedge.
+ if (state_ == kPlaying) {
+ const bool playback_success =
+ base::AtomicRefCountIsOne(&on_more_io_data_called_);
+
+ UMA_HISTOGRAM_BOOLEAN(
+ "Media.AudioOutputControllerPlaybackStartupSuccess", playback_success);
+
+ // Let the AudioManager try and fix it.
+ if (!playback_success)
+ audio_manager_->FixWedgedAudio();
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index 615c6a5e6c6..d16ce9e79b6 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -9,6 +9,7 @@
#include "base/callback.h"
#include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
+#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_power_monitor.h"
@@ -91,11 +92,10 @@ class MEDIA_EXPORT AudioOutputController
// prepare more data and perform synchronization.
virtual void UpdatePendingBytes(uint32 bytes) = 0;
- // Attempt to completely fill |dest|, return the actual number of frames
- // that could be read. |source| may optionally be provided for input data.
- // If |block| is specified, the Read() will block until data is available
- // or a timeout is reached.
- virtual int Read(bool block, const AudioBus* source, AudioBus* dest) = 0;
+ // Attempts to completely fill |dest|, zeroing |dest| if the request can not
+ // be fulfilled (due to timeout). |source| may optionally be provided for
+ // input data.
+ virtual void Read(const AudioBus* source, AudioBus* dest) = 0;
// Close this synchronous reader.
virtual void Close() = 0;
@@ -135,6 +135,23 @@ class MEDIA_EXPORT AudioOutputController
// Sets the volume of the audio output stream.
void SetVolume(double volume);
+ // Calls |callback| (on the caller's thread) with the current output
+ // device ID.
+ void GetOutputDeviceId(
+ base::Callback<void(const std::string&)> callback) const;
+
+ // Changes which output device to use. If desired, you can provide a
+ // callback that will be notified (on the thread you called from)
+ // when the function has completed execution.
+ //
+ // Changing the output device causes the controller to go through
+ // the same state transition back to the current state as a call to
+ // OnDeviceChange (unless it is currently diverting, see
+ // Start/StopDiverting below, in which case the state transition
+ // will happen when StopDiverting is called).
+ void SwitchOutputDevice(const std::string& output_device_id,
+ const base::Closure& callback);
+
// AudioSourceCallback implementation.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) OVERRIDE;
@@ -185,6 +202,8 @@ class MEDIA_EXPORT AudioOutputController
void DoPause();
void DoClose();
void DoSetVolume(double volume);
+ std::string DoGetOutputDeviceId() const;
+ void DoSwitchOutputDevice(const std::string& output_device_id);
void DoReportError();
void DoStartDiverting(AudioOutputStream* to_stream);
void DoStopDiverting();
@@ -204,13 +223,16 @@ class MEDIA_EXPORT AudioOutputController
void AllowEntryToOnMoreIOData();
void DisallowEntryToOnMoreIOData();
+ // Checks if a stream was started successfully but never calls OnMoreIOData().
+ void WedgeCheck();
+
AudioManager* const audio_manager_;
const AudioParameters params_;
EventHandler* const handler_;
// Specifies the device id of the output device to open or empty for the
// default output device.
- const std::string output_device_id_;
+ std::string output_device_id_;
// Used by the unified IO to open the correct input device.
const std::string input_device_id_;
@@ -249,9 +271,9 @@ class MEDIA_EXPORT AudioOutputController
base::CancelableClosure power_poll_callback_;
#endif
- // When starting stream we wait for data to become available.
- // Number of times left.
- int number_polling_attempts_left_;
+ // Flags when we've asked for a stream to start but it never did.
+ base::AtomicRefCount on_more_io_data_called_;
+ scoped_ptr<base::OneShotTimer<AudioOutputController> > wedge_timer_;
DISALLOW_COPY_AND_ASSIGN(AudioOutputController);
};
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index a7118e17a30..457265ec970 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -10,6 +10,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/waitable_event.h"
+#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_output_controller.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
@@ -53,7 +54,7 @@ class MockAudioOutputControllerSyncReader
MockAudioOutputControllerSyncReader() {}
MOCK_METHOD1(UpdatePendingBytes, void(uint32 bytes));
- MOCK_METHOD3(Read, int(bool block, const AudioBus* source, AudioBus* dest));
+ MOCK_METHOD2(Read, void(const AudioBus* source, AudioBus* dest));
MOCK_METHOD0(Close, void());
private:
@@ -83,16 +84,16 @@ ACTION_P(SignalEvent, event) {
static const float kBufferNonZeroData = 1.0f;
ACTION(PopulateBuffer) {
- arg2->Zero();
+ arg1->Zero();
// Note: To confirm the buffer will be populated in these tests, it's
// sufficient that only the first float in channel 0 is set to the value.
- arg2->channel(0)[0] = kBufferNonZeroData;
+ arg1->channel(0)[0] = kBufferNonZeroData;
}
class AudioOutputControllerTest : public testing::Test {
public:
AudioOutputControllerTest()
- : audio_manager_(AudioManager::Create()),
+ : audio_manager_(AudioManager::CreateForTesting()),
create_event_(false, false),
play_event_(false, false),
read_event_(false, false),
@@ -141,10 +142,9 @@ class AudioOutputControllerTest : public testing::Test {
// sent from the render process.
EXPECT_CALL(mock_sync_reader_, UpdatePendingBytes(_))
.Times(AtLeast(1));
- EXPECT_CALL(mock_sync_reader_, Read(_, _, _))
+ EXPECT_CALL(mock_sync_reader_, Read(_, _))
.WillRepeatedly(DoAll(PopulateBuffer(),
- SignalEvent(&read_event_),
- Return(params_.frames_per_buffer())));
+ SignalEvent(&read_event_)));
controller_->Play();
}
@@ -216,6 +216,19 @@ class AudioOutputControllerTest : public testing::Test {
controller_->StopDiverting();
}
+ void SwitchDevice(bool diverting) {
+ if (!diverting) {
+ // Expect the current stream to close and a new stream to start
+ // playing if not diverting. When diverting, nothing happens
+ // until diverting is stopped.
+ EXPECT_CALL(mock_event_handler_, OnPlaying())
+ .WillOnce(SignalEvent(&play_event_));
+ }
+
+ controller_->SwitchOutputDevice(AudioManagerBase::kDefaultDeviceName,
+ base::Bind(&base::DoNothing));
+ }
+
void Close() {
EXPECT_CALL(mock_sync_reader_, Close());
@@ -314,6 +327,18 @@ TEST_F(AudioOutputControllerTest, PlayDeviceChangeClose) {
Close();
}
+TEST_F(AudioOutputControllerTest, PlaySwitchDeviceClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ SwitchDevice(false);
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
TEST_F(AudioOutputControllerTest, PlayDivertRevertClose) {
Create(kSamplesPerPacket);
WaitForCreate();
@@ -329,6 +354,22 @@ TEST_F(AudioOutputControllerTest, PlayDivertRevertClose) {
Close();
}
+TEST_F(AudioOutputControllerTest, PlayDivertSwitchDeviceRevertClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ DivertWhilePlaying();
+ WaitForPlay();
+ SwitchDevice(true);
+ ReadDivertedAudioData();
+ RevertWhilePlaying();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
TEST_F(AudioOutputControllerTest, PlayDivertRevertDivertRevertClose) {
Create(kSamplesPerPacket);
WaitForCreate();
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index 0c406cab0d6..1f9efc185bd 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -10,8 +10,6 @@
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_output_controller.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/shared_memory_util.h"
#include "media/base/limits.h"
namespace media {
@@ -245,8 +243,8 @@ void AudioOutputDevice::OnStreamCreated(
DCHECK(audio_thread_.IsStopped());
audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback(
audio_parameters_, handle, length, callback_));
- audio_thread_.Start(audio_callback_.get(), socket_handle,
- "AudioOutputDevice");
+ audio_thread_.Start(
+ audio_callback_.get(), socket_handle, "AudioOutputDevice", true);
state_ = PAUSED;
// We handle the case where Play() and/or Pause() may have been called
@@ -273,26 +271,21 @@ AudioOutputDevice::AudioThreadCallback::AudioThreadCallback(
base::SharedMemoryHandle memory,
int memory_length,
AudioRendererSink::RenderCallback* render_callback)
- : AudioDeviceThread::Callback(audio_parameters,
- memory,
- memory_length,
- 1),
- render_callback_(render_callback) {
-}
+ : AudioDeviceThread::Callback(audio_parameters, memory, memory_length, 1),
+ render_callback_(render_callback) {}
AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
}
void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
CHECK_EQ(total_segments_, 1);
- CHECK(shared_memory_.Map(TotalSharedMemorySizeInBytes(memory_length_)));
+ CHECK(shared_memory_.Map(memory_length_));
// Calculate output and input memory size.
int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_);
int input_channels = audio_parameters_.input_channels();
int frames = audio_parameters_.frames_per_buffer();
- int input_memory_size =
- AudioBus::CalculateMemorySize(input_channels, frames);
+ int input_memory_size = AudioBus::CalculateMemorySize(input_channels, frames);
int io_size = output_memory_size + input_memory_size;
@@ -305,21 +298,17 @@ void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
// The input data is after the output data.
char* input_data =
static_cast<char*>(shared_memory_.memory()) + output_memory_size;
- input_bus_ =
- AudioBus::WrapMemory(input_channels, frames, input_data);
+ input_bus_ = AudioBus::WrapMemory(input_channels, frames, input_data);
}
}
// Called whenever we receive notifications about pending data.
void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
- if (pending_data == kPauseMark) {
- memset(shared_memory_.memory(), 0, memory_length_);
- SetActualDataSizeInBytes(&shared_memory_, memory_length_, 0);
+ // Negative |pending_data| indicates the browser side stream has stopped.
+ if (pending_data < 0)
return;
- }
- // Convert the number of pending bytes in the render buffer
- // into milliseconds.
+ // Convert the number of pending bytes in the render buffer into milliseconds.
int audio_delay_milliseconds = pending_data / bytes_per_ms_;
TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback");
@@ -328,25 +317,12 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
// |output_bus_| is wrapping the shared memory the Render() call is writing
// directly into the shared memory.
int input_channels = audio_parameters_.input_channels();
- size_t num_frames = audio_parameters_.frames_per_buffer();
-
- if (input_bus_.get() && input_channels > 0) {
- render_callback_->RenderIO(input_bus_.get(),
- output_bus_.get(),
- audio_delay_milliseconds);
+ if (input_bus_ && input_channels > 0) {
+ render_callback_->RenderIO(
+ input_bus_.get(), output_bus_.get(), audio_delay_milliseconds);
} else {
- num_frames = render_callback_->Render(output_bus_.get(),
- audio_delay_milliseconds);
+ render_callback_->Render(output_bus_.get(), audio_delay_milliseconds);
}
-
- // Let the host know we are done.
- // TODO(dalecurtis): Technically this is not always correct. Due to channel
- // padding for alignment, there may be more data available than this. We're
- // relying on AudioSyncReader::Read() to parse this with that in mind. Rename
- // these methods to Set/GetActualFrameCount().
- SetActualDataSizeInBytes(
- &shared_memory_, memory_length_,
- num_frames * sizeof(*output_bus_->channel(0)) * output_bus_->channels());
}
} // namespace media.
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
index 96da77d7404..7aca2627745 100644
--- a/chromium/media/audio/audio_output_device_unittest.cc
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -12,7 +12,6 @@
#include "base/test/test_timeouts.h"
#include "media/audio/audio_output_device.h"
#include "media/audio/sample_rates.h"
-#include "media/audio/shared_memory_util.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gmock_mutant.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -124,8 +123,6 @@ class AudioOutputDeviceTest
DISALLOW_COPY_AND_ASSIGN(AudioOutputDeviceTest);
};
-static const int kStreamId = 123;
-
int AudioOutputDeviceTest::CalculateMemorySize() {
// Calculate output and input memory size.
int output_memory_size =
@@ -135,13 +132,7 @@ int AudioOutputDeviceTest::CalculateMemorySize() {
int input_memory_size =
AudioBus::CalculateMemorySize(input_channels_, frames);
- int io_buffer_size = output_memory_size + input_memory_size;
-
- // This is where it gets a bit hacky. The shared memory contract between
- // AudioOutputDevice and its browser side counter part includes a bit more
- // than just the audio data, so we must call TotalSharedMemorySizeInBytes()
- // to get the actual size needed to fit the audio data plus the extra data.
- return TotalSharedMemorySizeInBytes(io_buffer_size);
+ return output_memory_size + input_memory_size;
}
AudioOutputDeviceTest::AudioOutputDeviceTest()
@@ -195,7 +186,7 @@ void AudioOutputDeviceTest::CreateStream() {
&duplicated_memory_handle));
audio_device_->OnStreamCreated(duplicated_memory_handle, audio_device_socket,
- PacketSizeInBytes(kMemorySize));
+ kMemorySize);
io_loop_.RunUntilIdle();
}
diff --git a/chromium/media/audio/audio_output_dispatcher.cc b/chromium/media/audio/audio_output_dispatcher.cc
index a151c449f02..89912c07dce 100644
--- a/chromium/media/audio/audio_output_dispatcher.cc
+++ b/chromium/media/audio/audio_output_dispatcher.cc
@@ -4,7 +4,7 @@
#include "media/audio/audio_output_dispatcher.h"
-#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
namespace media {
@@ -14,7 +14,7 @@ AudioOutputDispatcher::AudioOutputDispatcher(
const std::string& output_device_id,
const std::string& input_device_id)
: audio_manager_(audio_manager),
- message_loop_(base::MessageLoop::current()),
+ message_loop_(audio_manager->GetMessageLoop()),
params_(params),
output_device_id_(output_device_id),
input_device_id_(input_device_id) {
@@ -24,7 +24,7 @@ AudioOutputDispatcher::AudioOutputDispatcher(
}
AudioOutputDispatcher::~AudioOutputDispatcher() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
index 30266ed6a9a..d707aff14b6 100644
--- a/chromium/media/audio/audio_output_dispatcher.h
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -26,7 +26,7 @@
#include "media/audio/audio_parameters.h"
namespace base {
-class MessageLoop;
+class MessageLoopProxy;
}
namespace media {
@@ -66,21 +66,26 @@ class MEDIA_EXPORT AudioOutputDispatcher
// Called on the audio thread when the AudioManager is shutting down.
virtual void Shutdown() = 0;
+ // Called by the AudioManager to restart streams when a wedge is detected. A
+ // wedge means the OS failed to request any audio after StartStream(). When a
+ // wedge is detected all streams across all dispatchers must be closed. After
+ // all streams are closed, streams are restarted. See http://crbug.com/160920
+ virtual void CloseStreamsForWedgeFix() = 0;
+ virtual void RestartStreamsForWedgeFix() = 0;
+
// Accessor to the input device id used by unified IO.
const std::string& input_device_id() const { return input_device_id_; }
protected:
friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
- friend class AudioOutputProxyTest;
-
virtual ~AudioOutputDispatcher();
// A no-reference-held pointer (we don't want circular references) back to the
// AudioManager that owns this object.
AudioManager* audio_manager_;
- base::MessageLoop* message_loop_;
- AudioParameters params_;
- const std::string output_device_id_;
+ const scoped_refptr<base::MessageLoopProxy> message_loop_;
+ const AudioParameters params_;
+ std::string output_device_id_;
const std::string input_device_id_;
private:
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index bcdcd65146e..5118bef71e9 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -12,7 +12,6 @@
#include "base/time/time.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_util.h"
namespace media {
@@ -22,36 +21,33 @@ AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
const std::string& output_device_id,
const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, params, output_device_id,
- input_device_id),
- pause_delay_(base::TimeDelta::FromMicroseconds(
- 2 * params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
- static_cast<float>(params.sample_rate()))),
- paused_proxies_(0),
- weak_this_(this),
+ : AudioOutputDispatcher(audio_manager,
+ params,
+ output_device_id,
+ input_device_id),
+ idle_proxies_(0),
close_timer_(FROM_HERE,
close_delay,
this,
- &AudioOutputDispatcherImpl::ClosePendingStreams) {
-}
+ &AudioOutputDispatcherImpl::CloseAllIdleStreams),
+ audio_log_(
+ audio_manager->CreateAudioLog(AudioLogFactory::AUDIO_OUTPUT_STREAM)),
+ audio_stream_id_(0) {}
AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
+ DCHECK_EQ(idle_proxies_, 0u);
DCHECK(proxy_to_physical_map_.empty());
DCHECK(idle_streams_.empty());
- DCHECK(pausing_streams_.empty());
}
bool AudioOutputDispatcherImpl::OpenStream() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
-
- paused_proxies_++;
+ DCHECK(message_loop_->BelongsToCurrentThread());
// Ensure that there is at least one open stream.
- if (idle_streams_.empty() && !CreateAndOpenStream()) {
- paused_proxies_--;
+ if (idle_streams_.empty() && !CreateAndOpenStream())
return false;
- }
+ ++idle_proxies_;
close_timer_.Reset();
return true;
}
@@ -59,34 +55,34 @@ bool AudioOutputDispatcherImpl::OpenStream() {
bool AudioOutputDispatcherImpl::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(proxy_to_physical_map_.find(stream_proxy) ==
+ proxy_to_physical_map_.end());
if (idle_streams_.empty() && !CreateAndOpenStream())
return false;
AudioOutputStream* physical_stream = idle_streams_.back();
- DCHECK(physical_stream);
idle_streams_.pop_back();
- DCHECK_GT(paused_proxies_, 0u);
- --paused_proxies_;
-
- close_timer_.Reset();
-
- // Schedule task to allocate streams for other proxies if we need to.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputDispatcherImpl::OpenTask, weak_this_.GetWeakPtr()));
+ DCHECK_GT(idle_proxies_, 0u);
+ --idle_proxies_;
double volume = 0;
stream_proxy->GetVolume(&volume);
physical_stream->SetVolume(volume);
+ const int stream_id = audio_stream_ids_[physical_stream];
+ audio_log_->OnSetVolume(stream_id, volume);
physical_stream->Start(callback);
+ audio_log_->OnStarted(stream_id);
proxy_to_physical_map_[stream_proxy] = physical_stream;
+
+ close_timer_.Reset();
return true;
}
void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
DCHECK(it != proxy_to_physical_map_.end());
@@ -94,81 +90,46 @@ void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
proxy_to_physical_map_.erase(it);
physical_stream->Stop();
+ audio_log_->OnStopped(audio_stream_ids_[physical_stream]);
+ ++idle_proxies_;
+ idle_streams_.push_back(physical_stream);
- ++paused_proxies_;
-
- pausing_streams_.push_front(physical_stream);
-
- // Don't recycle stream until two buffers worth of time has elapsed.
- message_loop_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AudioOutputDispatcherImpl::StopStreamTask,
- weak_this_.GetWeakPtr()),
- pause_delay_);
+ close_timer_.Reset();
}
void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
if (it != proxy_to_physical_map_.end()) {
AudioOutputStream* physical_stream = it->second;
physical_stream->SetVolume(volume);
+ audio_log_->OnSetVolume(audio_stream_ids_[physical_stream], volume);
}
}
-void AudioOutputDispatcherImpl::StopStreamTask() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
-
- if (pausing_streams_.empty())
- return;
-
- AudioOutputStream* stream = pausing_streams_.back();
- pausing_streams_.pop_back();
- idle_streams_.push_back(stream);
- close_timer_.Reset();
-}
-
void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
- while (!pausing_streams_.empty()) {
- idle_streams_.push_back(pausing_streams_.back());
- pausing_streams_.pop_back();
- }
-
- DCHECK_GT(paused_proxies_, 0u);
- paused_proxies_--;
+ DCHECK_GT(idle_proxies_, 0u);
+ --idle_proxies_;
- while (idle_streams_.size() > paused_proxies_) {
- idle_streams_.back()->Close();
- idle_streams_.pop_back();
- }
+ // Leave at least a single stream running until the close timer fires to help
+ // cycle time when streams are opened and closed repeatedly.
+ CloseIdleStreams(std::max(idle_proxies_, static_cast<size_t>(1)));
+ close_timer_.Reset();
}
void AudioOutputDispatcherImpl::Shutdown() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
-
- // Cancel any pending tasks to close paused streams or create new ones.
- weak_this_.InvalidateWeakPtrs();
+ DCHECK(message_loop_->BelongsToCurrentThread());
- // No AudioOutputProxy objects should hold a reference to us when we get
- // to this stage.
- DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
-
- AudioOutputStreamList::iterator it = idle_streams_.begin();
- for (; it != idle_streams_.end(); ++it)
- (*it)->Close();
- idle_streams_.clear();
-
- it = pausing_streams_.begin();
- for (; it != pausing_streams_.end(); ++it)
- (*it)->Close();
- pausing_streams_.clear();
+ // Close all idle streams immediately. The |close_timer_| will handle
+ // invalidating any outstanding tasks upon its destruction.
+ CloseAllIdleStreams();
}
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
params_, output_device_id_, input_device_id_);
if (!stream)
@@ -178,29 +139,48 @@ bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
stream->Close();
return false;
}
+
+ const int stream_id = audio_stream_id_++;
+ audio_stream_ids_[stream] = stream_id;
+ audio_log_->OnCreated(
+ stream_id, params_, input_device_id_, output_device_id_);
+
idle_streams_.push_back(stream);
return true;
}
-void AudioOutputDispatcherImpl::OpenTask() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
- // Make sure that we have at least one stream allocated if there
- // are paused streams.
- if (paused_proxies_ > 0 && idle_streams_.empty() &&
- pausing_streams_.empty()) {
- CreateAndOpenStream();
+void AudioOutputDispatcherImpl::CloseAllIdleStreams() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ CloseIdleStreams(0);
+}
+
+void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ if (idle_streams_.size() <= keep_alive)
+ return;
+ for (size_t i = keep_alive; i < idle_streams_.size(); ++i) {
+ AudioOutputStream* stream = idle_streams_[i];
+ stream->Close();
+
+ AudioStreamIDMap::iterator it = audio_stream_ids_.find(stream);
+ DCHECK(it != audio_stream_ids_.end());
+ audio_log_->OnClosed(it->second);
+ audio_stream_ids_.erase(it);
}
+ idle_streams_.erase(idle_streams_.begin() + keep_alive, idle_streams_.end());
+}
- close_timer_.Reset();
+void AudioOutputDispatcherImpl::CloseStreamsForWedgeFix() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ CloseAllIdleStreams();
}
-// This method is called by |close_timer_|.
-void AudioOutputDispatcherImpl::ClosePendingStreams() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
- while (!idle_streams_.empty()) {
- idle_streams_.back()->Close();
- idle_streams_.pop_back();
- }
+void AudioOutputDispatcherImpl::RestartStreamsForWedgeFix() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Should only be called when the dispatcher is used with fake streams which
+ // don't need to be shutdown or restarted.
+ CHECK_EQ(params_.format(), AudioParameters::AUDIO_FAKE);
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index b59f835f9b0..037e11466f1 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -13,14 +13,14 @@
#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
#define MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
-#include <list>
#include <map>
+#include <vector>
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
+#include "media/audio/audio_logging.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_output_dispatcher.h"
#include "media/audio/audio_parameters.h"
@@ -31,8 +31,8 @@ class AudioOutputProxy;
class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
public:
- // |close_delay_ms| specifies delay after the stream is paused until
- // the audio device is closed.
+ // |close_delay| specifies delay after the stream is idle until the audio
+ // device is closed.
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
@@ -48,52 +48,53 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) OVERRIDE;
- // Holds the physical stream temporarily in |pausing_streams_| and then
- // |stream| is added to the pool of pending streams (i.e. |idle_streams_|).
+ // Stops the stream assigned to the specified proxy and moves it into
+ // |idle_streams_| for reuse by other proxies.
virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) OVERRIDE;
+ // Closes |idle_streams_| until the number of |idle_streams_| is equal to the
+ // |idle_proxies_| count. If there are no |idle_proxies_| a single stream is
+ // kept alive until |close_timer_| fires.
virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void Shutdown() OVERRIDE;
+ virtual void CloseStreamsForWedgeFix() OVERRIDE;
+ virtual void RestartStreamsForWedgeFix() OVERRIDE;
+
private:
- typedef std::map<AudioOutputProxy*, AudioOutputStream*> AudioStreamMap;
friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
virtual ~AudioOutputDispatcherImpl();
- friend class AudioOutputProxyTest;
-
// Creates a new physical output stream, opens it and pushes to
// |idle_streams_|. Returns false if the stream couldn't be created or
// opened.
bool CreateAndOpenStream();
- // A task scheduled by StartStream(). Opens a new stream and puts
- // it in |idle_streams_|.
- void OpenTask();
-
- // Before a stream is reused, it should sit idle for a bit. This task is
- // called once that time has elapsed.
- void StopStreamTask();
+ // Closes all |idle_streams_|.
+ void CloseAllIdleStreams();
+ // Similar to CloseAllIdleStreams(), but keeps |keep_alive| streams alive.
+ void CloseIdleStreams(size_t keep_alive);
- // Called by |close_timer_|. Closes all pending streams.
- void ClosePendingStreams();
+ size_t idle_proxies_;
+ std::vector<AudioOutputStream*> idle_streams_;
- base::TimeDelta pause_delay_;
- size_t paused_proxies_;
- typedef std::list<AudioOutputStream*> AudioOutputStreamList;
- AudioOutputStreamList idle_streams_;
- AudioOutputStreamList pausing_streams_;
-
- // Used to post delayed tasks to ourselves that we cancel inside Shutdown().
- base::WeakPtrFactory<AudioOutputDispatcherImpl> weak_this_;
+ // When streams are stopped they're added to |idle_streams_|, if no stream is
+ // reused before |close_delay_| elapses |close_timer_| will run
+ // CloseIdleStreams().
base::DelayTimer<AudioOutputDispatcherImpl> close_timer_;
+ typedef std::map<AudioOutputProxy*, AudioOutputStream*> AudioStreamMap;
AudioStreamMap proxy_to_physical_map_;
+ scoped_ptr<AudioLog> audio_log_;
+ typedef std::map<AudioOutputStream*, int> AudioStreamIDMap;
+ AudioStreamIDMap audio_stream_ids_;
+ int audio_stream_id_;
+
DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcherImpl);
};
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index 1806ce66131..cea098820aa 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -6,11 +6,13 @@
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/run_loop.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_output_proxy.h"
#include "media/audio/audio_output_resampler.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "media/audio/fake_audio_output_stream.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,10 +38,7 @@ using media::FakeAudioOutputStream;
namespace {
-static const int kTestCloseDelayMs = 100;
-
-// Used in the test where we don't want a stream to be closed unexpectedly.
-static const int kTestBigCloseDelaySeconds = 1000;
+static const int kTestCloseDelayMs = 10;
// Delay between callbacks to AudioSourceCallback::OnMoreData.
static const int kOnMoreDataCallbackDelayMs = 10;
@@ -87,14 +86,14 @@ class MockAudioOutputStream : public AudioOutputStream {
class MockAudioManager : public AudioManagerBase {
public:
- MockAudioManager() {}
+ MockAudioManager() : AudioManagerBase(&fake_audio_log_factory_) {}
virtual ~MockAudioManager() {
Shutdown();
}
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
- MOCK_METHOD0(GetAudioInputDeviceModel, string16());
+ MOCK_METHOD0(GetAudioInputDeviceModel, base::string16());
MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
const AudioParameters& params,
const std::string& device_id,
@@ -107,6 +106,7 @@ class MockAudioManager : public AudioManagerBase {
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
+ MOCK_METHOD0(GetWorkerLoop, scoped_refptr<base::MessageLoopProxy>());
MOCK_METHOD1(GetAudioInputDeviceNames, void(
media::AudioDeviceNames* device_name));
@@ -121,6 +121,9 @@ class MockAudioManager : public AudioManagerBase {
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(GetPreferredOutputStreamParameters, AudioParameters(
const std::string& device_id, const AudioParameters& params));
+
+ private:
+ media::FakeAudioLogFactory fake_audio_log_factory_;
};
class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
@@ -145,33 +148,28 @@ class AudioOutputProxyTest : public testing::Test {
virtual void SetUp() {
EXPECT_CALL(manager_, GetMessageLoop())
.WillRepeatedly(Return(message_loop_.message_loop_proxy()));
+ EXPECT_CALL(manager_, GetWorkerLoop())
+ .WillRepeatedly(Return(message_loop_.message_loop_proxy()));
+ // Use a low sample rate and large buffer size when testing otherwise the
+ // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
+ // RunUntilIdle() will never terminate.
+ params_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 8000, 16, 2048);
InitDispatcher(base::TimeDelta::FromMilliseconds(kTestCloseDelayMs));
}
virtual void TearDown() {
- // All paused proxies should have been closed at this point.
- EXPECT_EQ(0u, dispatcher_impl_->paused_proxies_);
-
// This is necessary to free all proxy objects that have been
// closed by the test.
message_loop_.RunUntilIdle();
}
virtual void InitDispatcher(base::TimeDelta close_delay) {
- // Use a low sample rate and large buffer size when testing otherwise the
- // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
- // RunUntilIdle() will never terminate.
- params_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 8000, 16, 2048);
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
std::string(),
close_delay);
-
- // Necessary to know how long the dispatcher will wait before posting
- // StopStreamTask.
- pause_delay_ = dispatcher_impl_->pause_delay_;
}
virtual void OnStart() {}
@@ -180,15 +178,24 @@ class AudioOutputProxyTest : public testing::Test {
return manager_;
}
- // Wait for the close timer to fire.
- void WaitForCloseTimer(const int timer_delay_ms) {
- message_loop_.RunUntilIdle(); // OpenTask() may reset the timer.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(timer_delay_ms) * 2);
- message_loop_.RunUntilIdle();
+ void WaitForCloseTimer(MockAudioOutputStream* stream) {
+ base::RunLoop run_loop;
+ EXPECT_CALL(*stream, Close())
+ .WillOnce(testing::InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
+ run_loop.Run();
}
- // Methods that do actual tests.
+ void CloseAndWaitForCloseTimer(AudioOutputProxy* proxy,
+ MockAudioOutputStream* stream) {
+ // Close the stream and verify it doesn't happen immediately.
+ proxy->Close();
+ Mock::VerifyAndClear(stream);
+
+ // Wait for the actual close event to come from the close timer.
+ WaitForCloseTimer(stream);
+ }
+
+ // Basic Open() and Close() test.
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
@@ -196,16 +203,13 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy, &stream);
}
- // Create a stream, and then calls Start() and Stop().
+ // Creates a stream, and then calls Start() and Stop().
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
@@ -215,8 +219,6 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(true));
EXPECT_CALL(stream, SetVolume(_))
.Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy->Open());
@@ -225,13 +227,12 @@ class AudioOutputProxyTest : public testing::Test {
OnStart();
proxy->Stop();
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy, &stream);
EXPECT_TRUE(stream.stop_called());
EXPECT_TRUE(stream.start_called());
}
- // Verify that the stream is closed after Stop is called.
+ // Verify that the stream is closed after Stop() is called.
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
@@ -241,8 +242,6 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(true));
EXPECT_CALL(stream, SetVolume(_))
.Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy->Open());
@@ -251,19 +250,14 @@ class AudioOutputProxyTest : public testing::Test {
OnStart();
proxy->Stop();
- // Wait for StopStream() to post StopStreamTask().
- base::PlatformThread::Sleep(pause_delay_ * 2);
- WaitForCloseTimer(kTestCloseDelayMs);
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
-
+ // Wait for the close timer to fire after StopStream().
+ WaitForCloseTimer(&stream);
proxy->Close();
EXPECT_TRUE(stream.stop_called());
EXPECT_TRUE(stream.start_called());
}
- // Create two streams, but don't start them. Only one device must be open.
+ // Create two streams, but don't start them. Only one device must be opened.
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
@@ -271,16 +265,13 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy1->Open());
EXPECT_TRUE(proxy2->Open());
proxy1->Close();
- proxy2->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy2, &stream);
EXPECT_FALSE(stream.stop_called());
EXPECT_FALSE(stream.start_called());
}
@@ -299,7 +290,6 @@ class AudioOutputProxyTest : public testing::Test {
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_FALSE(proxy->Open());
proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
EXPECT_FALSE(stream.stop_called());
EXPECT_FALSE(stream.start_called());
}
@@ -311,61 +301,45 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy->Open());
- // Simulate a delay.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
- message_loop_.RunUntilIdle();
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
-
+ WaitForCloseTimer(&stream);
proxy->Close();
EXPECT_FALSE(stream.stop_called());
EXPECT_FALSE(stream.start_called());
}
- void TwoStreams_OnePlaying(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
+ void OneStream_TwoPlays(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2));
-
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream1, Close())
- .Times(1);
+ .WillOnce(Return(&stream));
- EXPECT_CALL(stream2, Open())
+ EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream2, Close())
- .Times(1);
+ EXPECT_CALL(stream, SetVolume(_))
+ .Times(2);
AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
proxy1->Start(&callback_);
- message_loop_.RunUntilIdle();
OnStart();
proxy1->Stop();
+ // The stream should now be idle and get reused by |proxy2|.
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy2->Open());
+ proxy2->Start(&callback_);
+ OnStart();
+ proxy2->Stop();
+
proxy1->Close();
- proxy2->Close();
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_FALSE(stream2.stop_called());
- EXPECT_FALSE(stream2.start_called());
+ CloseAndWaitForCloseTimer(proxy2, &stream);
+ EXPECT_TRUE(stream.stop_called());
+ EXPECT_TRUE(stream.start_called());
}
void TwoStreams_BothPlaying(AudioOutputDispatcher* dispatcher) {
@@ -380,15 +354,11 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(true));
EXPECT_CALL(stream1, SetVolume(_))
.Times(1);
- EXPECT_CALL(stream1, Close())
- .Times(1);
EXPECT_CALL(stream2, Open())
.WillOnce(Return(true));
EXPECT_CALL(stream2, SetVolume(_))
.Times(1);
- EXPECT_CALL(stream2, Close())
- .Times(1);
AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
@@ -399,10 +369,11 @@ class AudioOutputProxyTest : public testing::Test {
proxy2->Start(&callback_);
OnStart();
proxy1->Stop();
+ CloseAndWaitForCloseTimer(proxy1, &stream1);
+
proxy2->Stop();
+ CloseAndWaitForCloseTimer(proxy2, &stream2);
- proxy1->Close();
- proxy2->Close();
EXPECT_TRUE(stream1.stop_called());
EXPECT_TRUE(stream1.start_called());
EXPECT_TRUE(stream2.stop_called());
@@ -416,19 +387,11 @@ class AudioOutputProxyTest : public testing::Test {
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
EXPECT_TRUE(proxy->Open());
- // Simulate a delay.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
- message_loop_.RunUntilIdle();
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
+ WaitForCloseTimer(&stream);
// |stream| is closed at this point. Start() should reopen it again.
EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
@@ -452,7 +415,6 @@ class AudioOutputProxyTest : public testing::Test {
base::MessageLoop message_loop_;
scoped_refptr<AudioOutputDispatcherImpl> dispatcher_impl_;
- base::TimeDelta pause_delay_;
MockAudioManager manager_;
MockAudioSourceCallback callback_;
AudioParameters params_;
@@ -465,7 +427,6 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
}
virtual void InitDispatcher(base::TimeDelta close_delay) OVERRIDE {
- AudioOutputProxyTest::InitDispatcher(close_delay);
// Use a low sample rate and large buffer size when testing otherwise the
// FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
// RunUntilIdle() will never terminate.
@@ -478,10 +439,13 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
}
virtual void OnStart() OVERRIDE {
- // Let start run for a bit.
- message_loop_.RunUntilIdle();
- base::PlatformThread::Sleep(
+ // Let Start() run for a bit.
+ base::RunLoop run_loop;
+ message_loop_.PostDelayedTask(
+ FROM_HERE,
+ run_loop.QuitClosure(),
base::TimeDelta::FromMilliseconds(kStartRunTimeMs));
+ run_loop.Run();
}
protected:
@@ -490,86 +454,82 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
};
TEST_F(AudioOutputProxyTest, CreateAndClose) {
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher_impl_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher_impl_);
proxy->Close();
}
TEST_F(AudioOutputResamplerTest, CreateAndClose) {
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
proxy->Close();
}
TEST_F(AudioOutputProxyTest, OpenAndClose) {
- OpenAndClose(dispatcher_impl_.get());
+ OpenAndClose(dispatcher_impl_);
}
TEST_F(AudioOutputResamplerTest, OpenAndClose) {
- OpenAndClose(resampler_.get());
+ OpenAndClose(resampler_);
}
// Create a stream, and verify that it is closed after kTestCloseDelayMs.
// if it doesn't start playing.
TEST_F(AudioOutputProxyTest, CreateAndWait) {
- CreateAndWait(dispatcher_impl_.get());
+ CreateAndWait(dispatcher_impl_);
}
// Create a stream, and verify that it is closed after kTestCloseDelayMs.
// if it doesn't start playing.
TEST_F(AudioOutputResamplerTest, CreateAndWait) {
- CreateAndWait(resampler_.get());
+ CreateAndWait(resampler_);
}
TEST_F(AudioOutputProxyTest, StartAndStop) {
- StartAndStop(dispatcher_impl_.get());
+ StartAndStop(dispatcher_impl_);
}
TEST_F(AudioOutputResamplerTest, StartAndStop) {
- StartAndStop(resampler_.get());
+ StartAndStop(resampler_);
}
TEST_F(AudioOutputProxyTest, CloseAfterStop) {
- CloseAfterStop(dispatcher_impl_.get());
+ CloseAfterStop(dispatcher_impl_);
}
TEST_F(AudioOutputResamplerTest, CloseAfterStop) {
- CloseAfterStop(resampler_.get());
+ CloseAfterStop(resampler_);
}
-TEST_F(AudioOutputProxyTest, TwoStreams) { TwoStreams(dispatcher_impl_.get()); }
+TEST_F(AudioOutputProxyTest, TwoStreams) { TwoStreams(dispatcher_impl_); }
-TEST_F(AudioOutputResamplerTest, TwoStreams) { TwoStreams(resampler_.get()); }
+TEST_F(AudioOutputResamplerTest, TwoStreams) { TwoStreams(resampler_); }
// Two streams: verify that second stream is allocated when the first
// starts playing.
-TEST_F(AudioOutputProxyTest, TwoStreams_OnePlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_OnePlaying(dispatcher_impl_.get());
+TEST_F(AudioOutputProxyTest, OneStream_TwoPlays) {
+ OneStream_TwoPlays(dispatcher_impl_);
}
-TEST_F(AudioOutputResamplerTest, TwoStreams_OnePlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_OnePlaying(resampler_.get());
+TEST_F(AudioOutputResamplerTest, OneStream_TwoPlays) {
+ OneStream_TwoPlays(resampler_);
}
// Two streams, both are playing. Dispatcher should not open a third stream.
TEST_F(AudioOutputProxyTest, TwoStreams_BothPlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_BothPlaying(dispatcher_impl_.get());
+ TwoStreams_BothPlaying(dispatcher_impl_);
}
TEST_F(AudioOutputResamplerTest, TwoStreams_BothPlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_BothPlaying(resampler_.get());
+ TwoStreams_BothPlaying(resampler_);
}
-TEST_F(AudioOutputProxyTest, OpenFailed) { OpenFailed(dispatcher_impl_.get()); }
+TEST_F(AudioOutputProxyTest, OpenFailed) { OpenFailed(dispatcher_impl_); }
// Start() method failed.
TEST_F(AudioOutputProxyTest, StartFailed) {
- StartFailed(dispatcher_impl_.get());
+ StartFailed(dispatcher_impl_);
}
-TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_.get()); }
+TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_); }
// Simulate AudioOutputStream::Create() failure with a low latency stream and
// ensure AudioOutputResampler falls back to the high latency path.
@@ -581,13 +541,10 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
.WillRepeatedly(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy, &stream);
}
// Simulate AudioOutputStream::Open() failure with a low latency stream and
@@ -605,13 +562,10 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
.Times(1);
EXPECT_CALL(okay_stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(okay_stream, Close())
- .Times(1);
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy, &okay_stream);
}
// Simulate failures to open both the low latency and the fallback high latency
@@ -642,13 +596,10 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
.WillOnce(Return(true));
- EXPECT_CALL(okay_stream, Close())
- .Times(1);
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
+ CloseAndWaitForCloseTimer(proxy, &okay_stream);
}
// Simulate failures to open both the low latency, the fallback high latency
@@ -666,10 +617,9 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
EXPECT_FALSE(proxy->Open());
proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
}
// Simulate an eventual OpenStream() failure; i.e. successful OpenStream() calls
@@ -677,72 +627,126 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- MockAudioOutputStream stream3(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
- .WillOnce(Return(&stream3))
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
// Stream1 should be able to successfully open and start.
EXPECT_CALL(stream1, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream1, Close())
- .Times(1);
EXPECT_CALL(stream1, SetVolume(_))
.Times(1);
// Stream2 should also be able to successfully open and start.
EXPECT_CALL(stream2, Open())
.WillOnce(Return(true));
- EXPECT_CALL(stream2, Close())
- .Times(1);
EXPECT_CALL(stream2, SetVolume(_))
.Times(1);
- // Stream3 should fail on Open() (yet still be closed since
- // MakeAudioOutputStream returned a valid AudioOutputStream object).
- EXPECT_CALL(stream3, Open())
- .WillOnce(Return(false));
- EXPECT_CALL(stream3, Close())
- .Times(1);
-
// Open and start the first proxy and stream.
- AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_);
EXPECT_TRUE(proxy1->Open());
proxy1->Start(&callback_);
OnStart();
// Open and start the second proxy and stream.
- AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_);
EXPECT_TRUE(proxy2->Open());
proxy2->Start(&callback_);
OnStart();
// Attempt to open the third stream which should fail.
- AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
+ AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_);
EXPECT_FALSE(proxy3->Open());
+ proxy3->Close();
// Perform the required Stop()/Close() shutdown dance for each proxy. Under
// the hood each proxy should correctly call CloseStream() if OpenStream()
// succeeded or not.
+ proxy2->Stop();
+ CloseAndWaitForCloseTimer(proxy2, &stream2);
+
+ proxy1->Stop();
+ CloseAndWaitForCloseTimer(proxy1, &stream1);
+
+ EXPECT_TRUE(stream1.stop_called());
+ EXPECT_TRUE(stream1.start_called());
+ EXPECT_TRUE(stream2.stop_called());
+ EXPECT_TRUE(stream2.start_called());
+}
+
+// Ensures the methods used to fix audio output wedges are working correctly.
+TEST_F(AudioOutputResamplerTest, WedgeFix) {
+ MockAudioOutputStream stream1(&manager_, params_);
+ MockAudioOutputStream stream2(&manager_, params_);
+ MockAudioOutputStream stream3(&manager_, params_);
+
+ // Setup the mock such that all three streams are successfully created.
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ .WillOnce(Return(&stream1))
+ .WillOnce(Return(&stream2))
+ .WillOnce(Return(&stream3));
+
+ // Stream1 should be able to successfully open and start.
+ EXPECT_CALL(stream1, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream1, SetVolume(_));
+ EXPECT_CALL(stream2, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream2, SetVolume(_));
+
+ // Open and start the first proxy and stream.
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy1->Open());
+ proxy1->Start(&callback_);
+ OnStart();
+
+ // Open, but do not start the second proxy.
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy2->Open());
+
+ // Open, start and then stop the third proxy.
+ AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy3->Open());
+ proxy3->Start(&callback_);
+ OnStart();
proxy3->Stop();
+
+ // Wait for stream to timeout and shutdown.
+ WaitForCloseTimer(&stream2);
+
+ EXPECT_CALL(stream1, Close());
+ resampler_->CloseStreamsForWedgeFix();
+
+ // Don't pump the MessageLoop between CloseStreamsForWedgeFix() and
+ // RestartStreamsForWedgeFix() to simulate intended usage. The OnStart() call
+ // will take care of necessary work.
+
+ // Stream3 should take Stream1's place after RestartStreamsForWedgeFix(). No
+ // additional streams should be opened for proxy2 and proxy3.
+ EXPECT_CALL(stream3, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream3, SetVolume(_));
+
+ resampler_->RestartStreamsForWedgeFix();
+ OnStart();
+
+ // Perform the required Stop()/Close() shutdown dance for each proxy.
proxy3->Close();
- proxy2->Stop();
proxy2->Close();
proxy1->Stop();
- proxy1->Close();
+ CloseAndWaitForCloseTimer(proxy1, &stream3);
// Wait for all of the messages to fly and then verify stream behavior.
- WaitForCloseTimer(kTestCloseDelayMs);
EXPECT_TRUE(stream1.stop_called());
EXPECT_TRUE(stream1.start_called());
EXPECT_TRUE(stream2.stop_called());
EXPECT_TRUE(stream2.start_called());
- EXPECT_FALSE(stream3.stop_called());
- EXPECT_FALSE(stream3.start_called());
+ EXPECT_TRUE(stream3.stop_called());
+ EXPECT_TRUE(stream3.start_called());
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index da424ec1246..c53f3e089ce 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -14,7 +14,6 @@
#include "media/audio/audio_io.h"
#include "media/audio/audio_output_dispatcher_impl.h"
#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_util.h"
#include "media/audio/sample_rates.h"
#include "media/base/audio_converter.h"
#include "media/base/limits.h"
@@ -44,6 +43,8 @@ class OnMoreDataConverter
// Clears |source_callback_| and flushes the resampler.
void Stop();
+ bool started() { return source_callback_ != NULL; }
+
private:
// AudioConverter::InputCallback implementation.
virtual double ProvideInput(AudioBus* audio_bus,
@@ -51,15 +52,11 @@ class OnMoreDataConverter
// Ratio of input bytes to output bytes used to correct playback delay with
// regard to buffering and resampling.
- double io_ratio_;
+ const double io_ratio_;
- // Source callback and associated lock.
- base::Lock source_lock_;
+ // Source callback.
AudioOutputStream::AudioSourceCallback* source_callback_;
- // |source| passed to OnMoreIOData() which should be passed downstream.
- AudioBus* source_bus_;
-
// Last AudioBuffersState object received via OnMoreData(), used to correct
// playback delay by ProvideInput() and passed on to |source_callback_|.
AudioBuffersState current_buffers_state_;
@@ -121,28 +118,27 @@ static void RecordFallbackStats(const AudioParameters& output_params) {
}
}
+// Converts low latency based |output_params| into high latency appropriate
+// output parameters in error situations.
+void AudioOutputResampler::SetupFallbackParams() {
// Only Windows has a high latency output driver that is not the same as the low
// latency path.
#if defined(OS_WIN)
-// Converts low latency based |output_params| into high latency appropriate
-// output parameters in error situations.
-static AudioParameters SetupFallbackParams(
- const AudioParameters& input_params, const AudioParameters& output_params) {
// Choose AudioParameters appropriate for opening the device in high latency
// mode. |kMinLowLatencyFrameSize| is arbitrarily based on Pepper Flash's
// MAXIMUM frame size for low latency.
static const int kMinLowLatencyFrameSize = 2048;
- int frames_per_buffer = std::min(
- std::max(input_params.frames_per_buffer(), kMinLowLatencyFrameSize),
- static_cast<int>(
- GetHighLatencyOutputBufferSize(input_params.sample_rate())));
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, input_params.channel_layout(),
- input_params.sample_rate(), input_params.bits_per_sample(),
+ const int frames_per_buffer =
+ std::max(params_.frames_per_buffer(), kMinLowLatencyFrameSize);
+
+ output_params_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, params_.channel_layout(),
+ params_.sample_rate(), params_.bits_per_sample(),
frames_per_buffer);
-}
+ output_device_id_ = "";
+ Initialize();
#endif
+}
AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
@@ -178,7 +174,7 @@ void AudioOutputResampler::Initialize() {
}
bool AudioOutputResampler::OpenStream() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
if (dispatcher_->OpenStream()) {
// Only record the UMA statistic if we didn't fallback during construction
@@ -210,8 +206,7 @@ bool AudioOutputResampler::OpenStream() {
DLOG(ERROR) << "Unable to open audio device in low latency mode. Falling "
<< "back to high latency audio output.";
- output_params_ = SetupFallbackParams(params_, output_params_);
- Initialize();
+ SetupFallbackParams();
if (dispatcher_->OpenStream()) {
streams_opened_ = true;
return true;
@@ -238,7 +233,7 @@ bool AudioOutputResampler::OpenStream() {
bool AudioOutputResampler::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
OnMoreDataConverter* resampler_callback = NULL;
CallbackMap::iterator it = callbacks_.find(stream_proxy);
@@ -258,12 +253,12 @@ bool AudioOutputResampler::StartStream(
void AudioOutputResampler::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
dispatcher_->StreamVolumeSet(stream_proxy, volume);
}
void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
dispatcher_->StopStream(stream_proxy);
// Now that StopStream() has completed the underlying physical stream should
@@ -275,7 +270,7 @@ void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
dispatcher_->CloseStream(stream_proxy);
// We assume that StopStream() is always called prior to CloseStream(), so
@@ -288,7 +283,7 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::Shutdown() {
- DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ DCHECK(message_loop_->BelongsToCurrentThread());
// No AudioOutputProxy objects should hold a reference to us when we get
// to this stage.
@@ -298,16 +293,44 @@ void AudioOutputResampler::Shutdown() {
DCHECK(callbacks_.empty());
}
+void AudioOutputResampler::CloseStreamsForWedgeFix() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Stop and close all active streams. Once all streams across all dispatchers
+ // have been closed the AudioManager will call RestartStreamsForWedgeFix().
+ for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
+ ++it) {
+ if (it->second->started())
+ dispatcher_->StopStream(it->first);
+ dispatcher_->CloseStream(it->first);
+ }
+
+ // Close all idle streams as well.
+ dispatcher_->CloseStreamsForWedgeFix();
+}
+
+void AudioOutputResampler::RestartStreamsForWedgeFix() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ // By opening all streams first and then starting them one by one we ensure
+ // the dispatcher only opens streams for those which will actually be used.
+ for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
+ ++it) {
+ dispatcher_->OpenStream();
+ }
+ for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
+ ++it) {
+ if (it->second->started())
+ dispatcher_->StartStream(it->second, it->first);
+ }
+}
+
OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
const AudioParameters& output_params)
- : source_callback_(NULL),
- source_bus_(NULL),
+ : io_ratio_(static_cast<double>(input_params.GetBytesPerSecond()) /
+ output_params.GetBytesPerSecond()),
+ source_callback_(NULL),
input_bytes_per_second_(input_params.GetBytesPerSecond()),
- audio_converter_(input_params, output_params, false) {
- io_ratio_ =
- static_cast<double>(input_params.GetBytesPerSecond()) /
- output_params.GetBytesPerSecond();
-}
+ audio_converter_(input_params, output_params, false) {}
OnMoreDataConverter::~OnMoreDataConverter() {
// Ensure Stop() has been called so we don't end up with an AudioOutputStream
@@ -317,7 +340,6 @@ OnMoreDataConverter::~OnMoreDataConverter() {
void OnMoreDataConverter::Start(
AudioOutputStream::AudioSourceCallback* callback) {
- base::AutoLock auto_lock(source_lock_);
CHECK(!source_callback_);
source_callback_ = callback;
@@ -328,7 +350,6 @@ void OnMoreDataConverter::Start(
}
void OnMoreDataConverter::Stop() {
- base::AutoLock auto_lock(source_lock_);
CHECK(source_callback_);
source_callback_ = NULL;
audio_converter_.RemoveInput(this);
@@ -342,26 +363,20 @@ int OnMoreDataConverter::OnMoreData(AudioBus* dest,
int OnMoreDataConverter::OnMoreIOData(AudioBus* source,
AudioBus* dest,
AudioBuffersState buffers_state) {
- base::AutoLock auto_lock(source_lock_);
- // While we waited for |source_lock_| the callback might have been cleared.
- if (!source_callback_) {
- dest->Zero();
- return dest->frames();
- }
+ // Note: The input portion of OnMoreIOData() is not supported when a converter
+ // has been injected. Downstream clients prefer silence to potentially split
+ // apart input data.
- source_bus_ = source;
current_buffers_state_ = buffers_state;
audio_converter_.Convert(dest);
- // Always return the full number of frames requested, ProvideInput_Locked()
+ // Always return the full number of frames requested, ProvideInput()
// will pad with silence if it wasn't able to acquire enough data.
return dest->frames();
}
double OnMoreDataConverter::ProvideInput(AudioBus* dest,
base::TimeDelta buffer_delay) {
- source_lock_.AssertAcquired();
-
// Adjust playback delay to include |buffer_delay|.
// TODO(dalecurtis): Stop passing bytes around, it doesn't make sense since
// AudioBus is just float data. Use TimeDelta instead.
@@ -371,27 +386,18 @@ double OnMoreDataConverter::ProvideInput(AudioBus* dest,
buffer_delay.InSecondsF() * input_bytes_per_second_);
// Retrieve data from the original callback.
- int frames = source_callback_->OnMoreIOData(
- source_bus_, dest, new_buffers_state);
-
- // |source_bus_| should only be provided once.
- // TODO(dalecurtis, crogers): This is not a complete fix. If ProvideInput()
- // is called multiple times, we need to do something more clever here.
- source_bus_ = NULL;
+ const int frames = source_callback_->OnMoreIOData(
+ NULL, dest, new_buffers_state);
// Zero any unfilled frames if anything was filled, otherwise we'll just
// return a volume of zero and let AudioConverter drop the output.
if (frames > 0 && frames < dest->frames())
dest->ZeroFramesPartial(frames, dest->frames() - frames);
-
- // TODO(dalecurtis): Return the correct volume here.
return frames > 0 ? 1 : 0;
}
void OnMoreDataConverter::OnError(AudioOutputStream* stream) {
- base::AutoLock auto_lock(source_lock_);
- if (source_callback_)
- source_callback_->OnError(stream);
+ source_callback_->OnError(stream);
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index f9a75ac38f5..a8fca232470 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -53,11 +53,17 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
double volume) OVERRIDE;
virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void Shutdown() OVERRIDE;
+ virtual void CloseStreamsForWedgeFix() OVERRIDE;
+ virtual void RestartStreamsForWedgeFix() OVERRIDE;
private:
friend class base::RefCountedThreadSafe<AudioOutputResampler>;
virtual ~AudioOutputResampler();
+ // Converts low latency based output parameters into high latency
+ // appropriate output parameters in error situations.
+ void SetupFallbackParams();
+
// Used to initialize and reinitialize |dispatcher_|.
void Initialize();
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
index 5e77c60cb94..fff815610fe 100644
--- a/chromium/media/audio/audio_parameters.cc
+++ b/chromium/media/audio/audio_parameters.cc
@@ -16,7 +16,8 @@ AudioParameters::AudioParameters()
bits_per_sample_(0),
frames_per_buffer_(0),
channels_(0),
- input_channels_(0) {
+ input_channels_(0),
+ effects_(NO_EFFECTS) {
}
AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
@@ -28,20 +29,38 @@ AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
bits_per_sample_(bits_per_sample),
frames_per_buffer_(frames_per_buffer),
channels_(ChannelLayoutToChannelCount(channel_layout)),
- input_channels_(0) {
+ input_channels_(0),
+ effects_(NO_EFFECTS) {
}
AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
int input_channels,
int sample_rate, int bits_per_sample,
- int frames_per_buffer)
+ int frames_per_buffer, int effects)
: format_(format),
channel_layout_(channel_layout),
sample_rate_(sample_rate),
bits_per_sample_(bits_per_sample),
frames_per_buffer_(frames_per_buffer),
channels_(ChannelLayoutToChannelCount(channel_layout)),
- input_channels_(input_channels) {
+ input_channels_(input_channels),
+ effects_(effects) {
+}
+
+AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
+ int channels, int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer, int effects)
+ : format_(format),
+ channel_layout_(channel_layout),
+ sample_rate_(sample_rate),
+ bits_per_sample_(bits_per_sample),
+ frames_per_buffer_(frames_per_buffer),
+ channels_(channels),
+ input_channels_(input_channels),
+ effects_(effects) {
+ if (channel_layout != CHANNEL_LAYOUT_DISCRETE)
+ DCHECK_EQ(channels, ChannelLayoutToChannelCount(channel_layout));
}
void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
@@ -89,9 +108,10 @@ int AudioParameters::GetBytesPerFrame() const {
return channels_ * bits_per_sample_ / 8;
}
-void AudioParameters::SetDiscreteChannels(int channels) {
- channel_layout_ = CHANNEL_LAYOUT_DISCRETE;
- channels_ = channels;
+base::TimeDelta AudioParameters::GetBufferDuration() const {
+ return base::TimeDelta::FromMicroseconds(
+ frames_per_buffer_ * base::Time::kMicrosecondsPerSecond /
+ static_cast<float>(sample_rate_));
}
} // namespace media
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index bc629a7db00..62ff4fd48f1 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -6,6 +6,7 @@
#define MEDIA_AUDIO_AUDIO_PARAMETERS_H_
#include "base/basictypes.h"
+#include "base/time/time.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
@@ -43,6 +44,13 @@ class MEDIA_EXPORT AudioParameters {
kAudioCDSampleRate = 44100,
};
+ // Bitmasks to determine whether certain platform (typically hardware) audio
+ // effects should be enabled.
+ enum PlatformEffectsMask {
+ NO_EFFECTS = 0x0,
+ ECHO_CANCELLER = 0x1
+ };
+
AudioParameters();
AudioParameters(Format format, ChannelLayout channel_layout,
int sample_rate, int bits_per_sample,
@@ -50,7 +58,12 @@ class MEDIA_EXPORT AudioParameters {
AudioParameters(Format format, ChannelLayout channel_layout,
int input_channels,
int sample_rate, int bits_per_sample,
- int frames_per_buffer);
+ int frames_per_buffer, int effects);
+ AudioParameters(Format format, ChannelLayout channel_layout,
+ int channels, int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer, int effects);
+
void Reset(Format format, ChannelLayout channel_layout,
int channels, int input_channels,
int sample_rate, int bits_per_sample,
@@ -69,6 +82,10 @@ class MEDIA_EXPORT AudioParameters {
// Returns the number of bytes representing a frame of audio.
int GetBytesPerFrame() const;
+ // Returns the duration of this buffer as calculated from frames_per_buffer()
+ // and sample_rate().
+ base::TimeDelta GetBufferDuration() const;
+
Format format() const { return format_; }
ChannelLayout channel_layout() const { return channel_layout_; }
int sample_rate() const { return sample_rate_; }
@@ -76,9 +93,7 @@ class MEDIA_EXPORT AudioParameters {
int frames_per_buffer() const { return frames_per_buffer_; }
int channels() const { return channels_; }
int input_channels() const { return input_channels_; }
-
- // Set to CHANNEL_LAYOUT_DISCRETE with given number of channels.
- void SetDiscreteChannels(int channels);
+ int effects() const { return effects_; }
// Comparison with other AudioParams.
bool operator==(const AudioParameters& other) const {
@@ -88,10 +103,13 @@ class MEDIA_EXPORT AudioParameters {
channels_ == other.channels() &&
input_channels_ == other.input_channels() &&
bits_per_sample_ == other.bits_per_sample() &&
- frames_per_buffer_ == other.frames_per_buffer();
+ frames_per_buffer_ == other.frames_per_buffer() &&
+ effects_ == other.effects();
}
private:
+ // These members are mutable to support entire struct assignment. They should
+ // not be mutated individually.
Format format_; // Format of the stream.
ChannelLayout channel_layout_; // Order of surround sound channels.
int sample_rate_; // Sampling frequency/rate.
@@ -103,6 +121,7 @@ class MEDIA_EXPORT AudioParameters {
int input_channels_; // Optional number of input channels.
// Normally 0, but can be set to specify
// synchronized I/O.
+ int effects_; // Bitmask using PlatformEffectsMask.
};
// Comparison is useful when AudioParameters is used with std structures.
diff --git a/chromium/media/audio/audio_power_monitor.cc b/chromium/media/audio/audio_power_monitor.cc
index d8b9436060e..6536f464b9c 100644
--- a/chromium/media/audio/audio_power_monitor.cc
+++ b/chromium/media/audio/audio_power_monitor.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
+#include "media/base/vector_math.h"
namespace media {
@@ -36,30 +37,19 @@ void AudioPowerMonitor::Scan(const AudioBus& buffer, int num_frames) {
return;
// Calculate a new average power by applying a first-order low-pass filter
- // over the audio samples in |buffer|.
- //
- // TODO(miu): Implement optimized SSE/NEON to more efficiently compute the
- // results (in media/base/vector_math) in soon-upcoming change.
+ // (a.k.a. an exponentially-weighted moving average) over the audio samples in
+ // each channel in |buffer|.
float sum_power = 0.0f;
for (int i = 0; i < num_channels; ++i) {
- float average_power_this_channel = average_power_;
- bool clipped = false;
- const float* p = buffer.channel(i);
- const float* const end_of_samples = p + num_frames;
- for (; p < end_of_samples; ++p) {
- const float sample = *p;
- const float sample_squared = sample * sample;
- clipped |= (sample_squared > 1.0f);
- average_power_this_channel +=
- (sample_squared - average_power_this_channel) * sample_weight_;
- }
+ const std::pair<float, float> ewma_and_max = vector_math::EWMAAndMaxPower(
+ average_power_, buffer.channel(i), num_frames, sample_weight_);
// If data in audio buffer is garbage, ignore its effect on the result.
- if (base::IsNaN(average_power_this_channel)) {
- average_power_this_channel = average_power_;
- clipped = false;
+ if (!base::IsFinite(ewma_and_max.first)) {
+ sum_power += average_power_;
+ } else {
+ sum_power += ewma_and_max.first;
+ has_clipped_ |= (ewma_and_max.second > 1.0f);
}
- sum_power += average_power_this_channel;
- has_clipped_ |= clipped;
}
// Update accumulated results, with clamping for sanity.
diff --git a/chromium/media/audio/audio_util.cc b/chromium/media/audio/audio_util.cc
deleted file mode 100644
index 42c6c9109fd..00000000000
--- a/chromium/media/audio/audio_util.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Software adjust volume of samples, allows each audio stream its own
-// volume without impacting master volume for chrome and other applications.
-
-// Implemented as templates to allow 8, 16 and 32 bit implementations.
-// 8 bit is unsigned and biased by 128.
-
-// TODO(vrk): This file has been running pretty wild and free, and it's likely
-// that a lot of the functions can be simplified and made more elegant. Revisit
-// after other audio cleanup is done. (crbug.com/120319)
-
-#include "media/audio/audio_util.h"
-
-#include "base/command_line.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/time/time.h"
-#include "media/base/media_switches.h"
-
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#endif
-
-namespace media {
-
-// Returns user buffer size as specified on the command line or 0 if no buffer
-// size has been specified.
-int GetUserBufferSize() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- int buffer_size = 0;
- std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
- switches::kAudioBufferSize));
- if (base::StringToInt(buffer_size_str, &buffer_size) && buffer_size > 0)
- return buffer_size;
-
- return 0;
-}
-
-// Computes a buffer size based on the given |sample_rate|. Must be used in
-// conjunction with AUDIO_PCM_LINEAR.
-size_t GetHighLatencyOutputBufferSize(int sample_rate) {
- int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size)
- return user_buffer_size;
-
- // TODO(vrk/crogers): The buffer sizes that this function computes is probably
- // overly conservative. However, reducing the buffer size to 2048-8192 bytes
- // caused crbug.com/108396. This computation should be revisited while making
- // sure crbug.com/108396 doesn't happen again.
-
- // The minimum number of samples in a hardware packet.
- // This value is selected so that we can handle down to 5khz sample rate.
- static const size_t kMinSamplesPerHardwarePacket = 1024;
-
- // The maximum number of samples in a hardware packet.
- // This value is selected so that we can handle up to 192khz sample rate.
- static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
-
- // This constant governs the hardware audio buffer size, this value should be
- // chosen carefully.
- // This value is selected so that we have 8192 samples for 48khz streams.
- static const size_t kMillisecondsPerHardwarePacket = 170;
-
- // Select the number of samples that can provide at least
- // |kMillisecondsPerHardwarePacket| worth of audio data.
- size_t samples = kMinSamplesPerHardwarePacket;
- while (samples <= kMaxSamplesPerHardwarePacket &&
- samples * base::Time::kMillisecondsPerSecond <
- sample_rate * kMillisecondsPerHardwarePacket) {
- samples *= 2;
- }
- return samples;
-}
-
-#if defined(OS_WIN)
-
-int NumberOfWaveOutBuffers() {
- // Use the user provided buffer count if provided.
- int buffers = 0;
- std::string buffers_str(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kWaveOutBuffers));
- if (base::StringToInt(buffers_str, &buffers) && buffers > 0) {
- return buffers;
- }
-
- // Use 4 buffers for Vista, 3 for everyone else:
- // - The entire Windows audio stack was rewritten for Windows Vista and wave
- // out performance was degraded compared to XP.
- // - The regression was fixed in Windows 7 and most configurations will work
- // with 2, but some (e.g., some Sound Blasters) still need 3.
- // - Some XP configurations (even multi-processor ones) also need 3.
- return (base::win::GetVersion() == base::win::VERSION_VISTA) ? 4 : 3;
-}
-
-#endif
-
-} // namespace media
diff --git a/chromium/media/audio/audio_util.h b/chromium/media/audio/audio_util.h
deleted file mode 100644
index a11c327aa47..00000000000
--- a/chromium/media/audio/audio_util.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_UTIL_H_
-#define MEDIA_AUDIO_AUDIO_UTIL_H_
-
-#include "base/basictypes.h"
-#include "build/build_config.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Returns user buffer size as specified on the command line or 0 if no buffer
-// size has been specified.
-MEDIA_EXPORT int GetUserBufferSize();
-
-// Computes a buffer size based on the given |sample_rate|. Must be used in
-// conjunction with AUDIO_PCM_LINEAR.
-MEDIA_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
-
-#if defined(OS_WIN)
-
-// Returns number of buffers to be used by wave out.
-MEDIA_EXPORT int NumberOfWaveOutBuffers();
-
-#endif // defined(OS_WIN)
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_UTIL_H_
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index 14a0c4e86ac..876d6ce8136 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -9,7 +9,6 @@
#include "base/logging.h"
#include "base/nix/xdg_util.h"
#include "base/stl_util.h"
-#include "media/audio/audio_util.h"
#include "media/audio/cras/cras_input.h"
#include "media/audio/cras/cras_unified.h"
#include "media/base/channel_layout.h"
@@ -39,7 +38,8 @@ bool AudioManagerCras::HasAudioInputDevices() {
return true;
}
-AudioManagerCras::AudioManagerCras() {
+AudioManagerCras::AudioManagerCras(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
}
@@ -125,7 +125,7 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
AudioOutputStream* AudioManagerCras::MakeOutputStream(
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index 3b0ef530e07..589374ae0b9 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -15,7 +15,7 @@ namespace media {
class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
public:
- AudioManagerCras();
+ AudioManagerCras(AudioLogFactory* audio_log_factory);
// AudioManager implementation.
virtual bool HasAudioOutputDevices() OVERRIDE;
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index fd574dc86e5..c41f3645efd 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -10,9 +10,9 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/time/time.h"
+#include "media/audio/alsa/alsa_util.h"
#include "media/audio/audio_manager.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/linux/alsa_util.h"
namespace media {
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index a7741864b31..c85cf59dd5f 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -8,9 +8,8 @@
#include "base/command_line.h"
#include "base/logging.h"
-#include "media/audio/audio_util.h"
+#include "media/audio/alsa/alsa_util.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/linux/alsa_util.h"
namespace media {
@@ -162,6 +161,23 @@ void CrasUnifiedStream::Close() {
void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
CHECK(callback);
+
+ // Channel map to CRAS_CHANNEL, values in the same order of
+ // corresponding source in Chromium defined Channels.
+ static const int kChannelMap[] = {
+ CRAS_CH_FL,
+ CRAS_CH_FR,
+ CRAS_CH_FC,
+ CRAS_CH_LFE,
+ CRAS_CH_RL,
+ CRAS_CH_RR,
+ CRAS_CH_FLC,
+ CRAS_CH_FRC,
+ CRAS_CH_RC,
+ CRAS_CH_SL,
+ CRAS_CH_SR
+ };
+
source_callback_ = callback;
// Only start if we can enter the playing state.
@@ -180,6 +196,22 @@ void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
return;
}
+ // Initialize channel layout to all -1 to indicate that none of
+ // the channels is set in the layout.
+ int8 layout[CRAS_CH_MAX] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
+
+ // Converts to CRAS defined channels. ChannelOrder will return -1
+ // for channels that does not present in params_.channel_layout().
+ for (size_t i = 0; i < arraysize(kChannelMap); ++i)
+ layout[kChannelMap[i]] = ChannelOrder(params_.channel_layout(),
+ static_cast<Channels>(i));
+
+ if (cras_audio_format_set_channel_layout(audio_format, layout)) {
+ LOG(WARNING) << "Error setting channel layout.";
+ callback->OnError(this);
+ return;
+ }
+
cras_stream_params* stream_params = cras_client_unified_params_create(
stream_direction_,
params_.frames_per_buffer(),
diff --git a/chromium/media/audio/fake_audio_log_factory.cc b/chromium/media/audio/fake_audio_log_factory.cc
new file mode 100644
index 00000000000..6f752e559fd
--- /dev/null
+++ b/chromium/media/audio/fake_audio_log_factory.cc
@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/fake_audio_log_factory.h"
+
+namespace media {
+
+class FakeAudioLogImpl : public AudioLog {
+ public:
+ FakeAudioLogImpl() {}
+ virtual ~FakeAudioLogImpl() {}
+ virtual void OnCreated(int component_id,
+ const media::AudioParameters& params,
+ const std::string& input_device_id,
+ const std::string& output_device_id) OVERRIDE {}
+ virtual void OnStarted(int component_id) OVERRIDE {}
+ virtual void OnStopped(int component_id) OVERRIDE {}
+ virtual void OnClosed(int component_id) OVERRIDE {}
+ virtual void OnError(int component_id) OVERRIDE {}
+ virtual void OnSetVolume(int component_id, double volume) OVERRIDE {}
+};
+
+FakeAudioLogFactory::FakeAudioLogFactory() {}
+FakeAudioLogFactory::~FakeAudioLogFactory() {}
+
+scoped_ptr<AudioLog> FakeAudioLogFactory::CreateAudioLog(
+ AudioComponent component) {
+ return scoped_ptr<AudioLog>(new FakeAudioLogImpl());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_log_factory.h b/chromium/media/audio/fake_audio_log_factory.h
new file mode 100644
index 00000000000..30e39e63ff0
--- /dev/null
+++ b/chromium/media/audio/fake_audio_log_factory.h
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_LOG_FACTORY_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_LOG_FACTORY_H_
+
+#include "base/compiler_specific.h"
+#include "media/audio/audio_logging.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Creates stub AudioLog instances, for testing, which do nothing.
+class MEDIA_EXPORT FakeAudioLogFactory
+ : NON_EXPORTED_BASE(public AudioLogFactory) {
+ public:
+ FakeAudioLogFactory();
+ virtual ~FakeAudioLogFactory();
+ virtual scoped_ptr<AudioLog> CreateAudioLog(
+ AudioComponent component) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioLogFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_FAKE_AUDIO_LOG_FACTORY_H_
diff --git a/chromium/media/audio/fake_audio_manager.cc b/chromium/media/audio/fake_audio_manager.cc
new file mode 100644
index 00000000000..bfe9a0a7ff3
--- /dev/null
+++ b/chromium/media/audio/fake_audio_manager.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/fake_audio_manager.h"
+
+namespace media {
+
+namespace {
+
+const int kDefaultInputBufferSize = 1024;
+const int kDefaultSampleRate = 48000;
+
+} // namespace
+
+FakeAudioManager::FakeAudioManager(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory) {}
+
+FakeAudioManager::~FakeAudioManager() {
+ Shutdown();
+}
+
+// Implementation of AudioManager.
+bool FakeAudioManager::HasAudioOutputDevices() { return false; }
+
+bool FakeAudioManager::HasAudioInputDevices() { return false; }
+
+// Implementation of AudioManagerBase.
+AudioOutputStream* FakeAudioManager::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ return FakeAudioOutputStream::MakeFakeStream(this, params);
+}
+
+AudioOutputStream* FakeAudioManager::MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ return FakeAudioOutputStream::MakeFakeStream(this, params);
+}
+
+AudioInputStream* FakeAudioManager::MakeLinearInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) {
+ return FakeAudioInputStream::MakeFakeStream(this, params);
+}
+
+AudioInputStream* FakeAudioManager::MakeLowLatencyInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) {
+ return FakeAudioInputStream::MakeFakeStream(this, params);
+}
+
+AudioParameters FakeAudioManager::GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
+ const AudioParameters& input_params) {
+ static const int kDefaultOutputBufferSize = 2048;
+ static const int kDefaultSampleRate = 48000;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = kDefaultSampleRate;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = std::min(input_params.frames_per_buffer(), buffer_size);
+ }
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+}
+
+AudioParameters FakeAudioManager::GetInputStreamParameters(
+ const std::string& device_id) {
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultInputBufferSize);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_manager.h b/chromium/media/audio/fake_audio_manager.h
new file mode 100644
index 00000000000..b5c45201ed1
--- /dev/null
+++ b/chromium/media/audio/fake_audio_manager.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_MANAGER_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_MANAGER_H_
+
+#include <string>
+#include "base/compiler_specific.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/fake_audio_input_stream.h"
+#include "media/audio/fake_audio_output_stream.h"
+
+namespace media {
+
+class MEDIA_EXPORT FakeAudioManager : public AudioManagerBase {
+ public:
+ FakeAudioManager(AudioLogFactory* audio_log_factory);
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(const AudioParameters& params,
+ const std::string& device_id)
+ OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual ~FakeAudioManager();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const std::string& output_device_id,
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_FAKE_AUDIO_MANAGER_H_
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
index b21a054f13b..fb460ab6805 100644
--- a/chromium/media/audio/fake_audio_output_stream.cc
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -22,7 +22,7 @@ FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- fake_consumer_(manager->GetMessageLoop(), params) {
+ fake_consumer_(manager->GetWorkerLoop(), params) {
}
FakeAudioOutputStream::~FakeAudioOutputStream() {
@@ -60,7 +60,7 @@ void FakeAudioOutputStream::GetVolume(double* volume) {
};
void FakeAudioOutputStream::CallOnMoreData(AudioBus* audio_bus) {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetWorkerLoop()->BelongsToCurrentThread());
callback_->OnMoreData(audio_bus, AudioBuffersState());
}
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
index 708e4f26840..eaeb2f332b9 100644
--- a/chromium/media/audio/linux/audio_manager_linux.cc
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -2,52 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/linux/audio_manager_linux.h"
-
#include "base/command_line.h"
-#include "base/environment.h"
-#include "base/files/file_path.h"
-#include "base/logging.h"
#include "base/metrics/histogram.h"
-#include "base/nix/xdg_util.h"
-#include "base/process/launch.h"
-#include "base/stl_util.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
+#if defined(USE_ALSA)
+#include "media/audio/alsa/audio_manager_alsa.h"
+#else
+#include "media/audio/fake_audio_manager.h"
+#endif
#if defined(USE_CRAS)
#include "media/audio/cras/audio_manager_cras.h"
#endif
-#include "media/audio/linux/alsa_input.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_wrapper.h"
#if defined(USE_PULSEAUDIO)
#include "media/audio/pulse/audio_manager_pulse.h"
#endif
-#include "media/base/channel_layout.h"
-#include "media/base/limits.h"
#include "media/base/media_switches.h"
namespace media {
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-// Default sample rate for input and output streams.
-static const int kDefaultSampleRate = 48000;
-
-// Since "default", "pulse" and "dmix" devices are virtual devices mapped to
-// real devices, we remove them from the list to avoiding duplicate counting.
-// In addition, note that we support no more than 2 channels for recording,
-// hence surround devices are not stored in the list.
-static const char* kInvalidAudioInputDevices[] = {
- "default",
- "dmix",
- "null",
- "pulse",
- "surround",
-};
-
enum LinuxAudioIO {
kPulse,
kAlsa,
@@ -55,335 +26,28 @@ enum LinuxAudioIO {
kAudioIOMax // Must always be last!
};
-// static
-void AudioManagerLinux::ShowLinuxAudioInputSettings() {
- scoped_ptr<base::Environment> env(base::Environment::Create());
- CommandLine command_line(CommandLine::NO_PROGRAM);
- switch (base::nix::GetDesktopEnvironment(env.get())) {
- case base::nix::DESKTOP_ENVIRONMENT_GNOME:
- command_line.SetProgram(base::FilePath("gnome-volume-control"));
- break;
- case base::nix::DESKTOP_ENVIRONMENT_KDE3:
- case base::nix::DESKTOP_ENVIRONMENT_KDE4:
- command_line.SetProgram(base::FilePath("kmix"));
- break;
- case base::nix::DESKTOP_ENVIRONMENT_UNITY:
- command_line.SetProgram(base::FilePath("gnome-control-center"));
- command_line.AppendArg("sound");
- command_line.AppendArg("input");
- break;
- default:
- LOG(ERROR) << "Failed to show audio input settings: we don't know "
- << "what command to use for your desktop environment.";
- return;
- }
- base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
-}
-
-// Implementation of AudioManager.
-bool AudioManagerLinux::HasAudioOutputDevices() {
- return HasAnyAlsaAudioDevice(kStreamPlayback);
-}
-
-bool AudioManagerLinux::HasAudioInputDevices() {
- return HasAnyAlsaAudioDevice(kStreamCapture);
-}
-
-AudioManagerLinux::AudioManagerLinux()
- : wrapper_(new AlsaWrapper()) {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-}
-
-AudioManagerLinux::~AudioManagerLinux() {
- Shutdown();
-}
-
-void AudioManagerLinux::ShowAudioInputSettings() {
- ShowLinuxAudioInputSettings();
-}
-
-void AudioManagerLinux::GetAudioInputDeviceNames(
- AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- GetAlsaAudioDevices(kStreamCapture, device_names);
-}
-
-void AudioManagerLinux::GetAudioOutputDeviceNames(
- AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- GetAlsaAudioDevices(kStreamPlayback, device_names);
-}
-
-AudioParameters AudioManagerLinux::GetInputStreamParameters(
- const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultInputBufferSize);
-}
-
-void AudioManagerLinux::GetAlsaAudioDevices(
- StreamType type,
- media::AudioDeviceNames* device_names) {
- // Constants specified by the ALSA API for device hints.
- static const char kPcmInterfaceName[] = "pcm";
- int card = -1;
-
- // Loop through the sound cards to get ALSA device hints.
- while (!wrapper_->CardNext(&card) && card >= 0) {
- void** hints = NULL;
- int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
- if (!error) {
- GetAlsaDevicesInfo(type, hints, device_names);
-
- // Destroy the hints now that we're done with it.
- wrapper_->DeviceNameFreeHint(hints);
- } else {
- DLOG(WARNING) << "GetAlsaAudioDevices: unable to get device hints: "
- << wrapper_->StrError(error);
- }
- }
-}
-
-void AudioManagerLinux::GetAlsaDevicesInfo(
- AudioManagerLinux::StreamType type,
- void** hints,
- media::AudioDeviceNames* device_names) {
- static const char kIoHintName[] = "IOID";
- static const char kNameHintName[] = "NAME";
- static const char kDescriptionHintName[] = "DESC";
-
- const char* unwanted_device_type = UnwantedDeviceTypeWhenEnumerating(type);
-
- for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices of the right type. Valid values are
- // "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
- if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
- continue;
-
- // Found a device, prepend the default device since we always want
- // it to be on the top of the list for all platforms. And there is
- // no duplicate counting here since it is only done if the list is
- // still empty. Note, pulse has exclusively opened the default
- // device, so we must open the device via the "default" moniker.
- if (device_names->empty()) {
- device_names->push_front(media::AudioDeviceName(
- AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
- }
-
- // Get the unique device name for the device.
- scoped_ptr_malloc<char> unique_device_name(
- wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
-
- // Find out if the device is available.
- if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
- // Get the description for the device.
- scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
- *hint_iter, kDescriptionHintName));
-
- media::AudioDeviceName name;
- name.unique_id = unique_device_name.get();
- if (desc) {
- // Use the more user friendly description as name.
- // Replace '\n' with '-'.
- char* pret = strchr(desc.get(), '\n');
- if (pret)
- *pret = '-';
- name.device_name = desc.get();
- } else {
- // Virtual devices don't necessarily have descriptions.
- // Use their names instead.
- name.device_name = unique_device_name.get();
- }
-
- // Store the device information.
- device_names->push_back(name);
- }
- }
-}
-
-// static
-bool AudioManagerLinux::IsAlsaDeviceAvailable(
- AudioManagerLinux::StreamType type,
- const char* device_name) {
- if (!device_name)
- return false;
-
- // We do prefix matches on the device name to see whether to include
- // it or not.
- if (type == kStreamCapture) {
- // Check if the device is in the list of invalid devices.
- for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
- if (strncmp(kInvalidAudioInputDevices[i], device_name,
- strlen(kInvalidAudioInputDevices[i])) == 0)
- return false;
- }
- return true;
- } else {
- DCHECK_EQ(kStreamPlayback, type);
- // We prefer the device type that maps straight to hardware but
- // goes through software conversion if needed (e.g. incompatible
- // sample rate).
- // TODO(joi): Should we prefer "hw" instead?
- static const char kDeviceTypeDesired[] = "plughw";
- return strncmp(kDeviceTypeDesired,
- device_name,
- arraysize(kDeviceTypeDesired) - 1) == 0;
- }
-}
-
-// static
-const char* AudioManagerLinux::UnwantedDeviceTypeWhenEnumerating(
- AudioManagerLinux::StreamType wanted_type) {
- return wanted_type == kStreamPlayback ? "Input" : "Output";
-}
-
-bool AudioManagerLinux::HasAnyAlsaAudioDevice(
- AudioManagerLinux::StreamType stream) {
- static const char kPcmInterfaceName[] = "pcm";
- static const char kIoHintName[] = "IOID";
- void** hints = NULL;
- bool has_device = false;
- int card = -1;
-
- // Loop through the sound cards.
- // Don't use snd_device_name_hint(-1,..) since there is a access violation
- // inside this ALSA API with libasound.so.2.0.0.
- while (!wrapper_->CardNext(&card) && (card >= 0) && !has_device) {
- int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
- if (!error) {
- for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are |stream| capable. Valid values are
- // "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
- const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
- if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
- continue; // Wrong type, skip the device.
-
- // Found an input device.
- has_device = true;
- break;
- }
-
- // Destroy the hints now that we're done with it.
- wrapper_->DeviceNameFreeHint(hints);
- hints = NULL;
- } else {
- DLOG(WARNING) << "HasAnyAudioDevice: unable to get device hints: "
- << wrapper_->StrError(error);
- }
- }
-
- return has_device;
-}
-
-AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeOutputStream(params);
-}
-
-AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
- const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // TODO(xians): Use input_device_id for unified IO.
- return MakeOutputStream(params);
-}
-
-AudioInputStream* AudioManagerLinux::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeInputStream(params, device_id);
-}
-
-AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeInputStream(params, device_id);
-}
-
-AudioParameters AudioManagerLinux::GetPreferredOutputStreamParameters(
- const std::string& output_device_id,
- const AudioParameters& input_params) {
- // TODO(tommi): Support |output_device_id|.
- DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 2048;
- ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- int sample_rate = kDefaultSampleRate;
- int buffer_size = kDefaultOutputBufferSize;
- int bits_per_sample = 16;
- int input_channels = 0;
- if (input_params.IsValid()) {
- // Some clients, such as WebRTC, have a more limited use case and work
- // acceptably with a smaller buffer size. The check below allows clients
- // which want to try a smaller buffer size on Linux to do so.
- // TODO(dalecurtis): This should include bits per channel and channel layout
- // eventually.
- sample_rate = input_params.sample_rate();
- bits_per_sample = input_params.bits_per_sample();
- channel_layout = input_params.channel_layout();
- input_channels = input_params.input_channels();
- buffer_size = std::min(input_params.frames_per_buffer(), buffer_size);
- }
-
- int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size)
- buffer_size = user_buffer_size;
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
-}
-
-AudioOutputStream* AudioManagerLinux::MakeOutputStream(
- const AudioParameters& params) {
- std::string device_name = AlsaPcmOutputStream::kAutoSelectDevice;
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kAlsaOutputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kAlsaOutputDevice);
- }
- return new AlsaPcmOutputStream(device_name, params, wrapper_.get(), this);
-}
-
-AudioInputStream* AudioManagerLinux::MakeInputStream(
- const AudioParameters& params, const std::string& device_id) {
- std::string device_name = (device_id == AudioManagerBase::kDefaultDeviceId) ?
- AlsaPcmInputStream::kAutoSelectDevice : device_id;
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kAlsaInputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kAlsaInputDevice);
- }
-
- return new AlsaPcmInputStream(this, device_name, params, wrapper_.get());
-}
-
-AudioManager* CreateAudioManager() {
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_CRAS)
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax);
- return new AudioManagerCras();
+ return new AudioManagerCras(audio_log_factory);
}
#endif
#if defined(USE_PULSEAUDIO)
- AudioManager* manager = AudioManagerPulse::Create();
+ AudioManager* manager = AudioManagerPulse::Create(audio_log_factory);
if (manager) {
UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax);
return manager;
}
#endif
+#if defined(USE_ALSA)
UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax);
- return new AudioManagerLinux();
+ return new AudioManagerAlsa(audio_log_factory);
+#else
+ return new FakeAudioManager(audio_log_factory);
+#endif
}
} // namespace media
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index 051b709c31d..9fcd46a6a95 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -7,29 +7,14 @@
#include <CoreServices/CoreServices.h>
#include "base/basictypes.h"
-#include "base/command_line.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/media_switches.h"
+#include "media/base/audio_pull_fifo.h"
namespace media {
-static std::ostream& operator<<(std::ostream& os,
- const AudioStreamBasicDescription& format) {
- os << "sample rate : " << format.mSampleRate << std::endl
- << "format ID : " << format.mFormatID << std::endl
- << "format flags : " << format.mFormatFlags << std::endl
- << "bytes per packet : " << format.mBytesPerPacket << std::endl
- << "frames per packet : " << format.mFramesPerPacket << std::endl
- << "bytes per frame : " << format.mBytesPerFrame << std::endl
- << "channels per frame: " << format.mChannelsPerFrame << std::endl
- << "bits per channel : " << format.mBitsPerChannel;
- return os;
-}
-
static void ZeroBufferList(AudioBufferList* buffer_list) {
for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i) {
memset(buffer_list->mBuffers[i].mData,
@@ -45,7 +30,7 @@ static void WrapBufferList(AudioBufferList* buffer_list,
DCHECK(bus);
const int channels = bus->channels();
const int buffer_list_channels = buffer_list->mNumberBuffers;
- DCHECK_EQ(channels, buffer_list_channels);
+ CHECK_EQ(channels, buffer_list_channels);
// Copy pointers from AudioBufferList.
for (int i = 0; i < channels; ++i) {
@@ -72,8 +57,8 @@ AUHALStream::AUHALStream(
volume_(1),
hardware_latency_frames_(0),
stopped_(false),
- notified_for_possible_device_change_(false),
- input_buffer_list_(NULL) {
+ input_buffer_list_(NULL),
+ current_hardware_pending_bytes_(0) {
// We must have a manager.
DCHECK(manager_);
@@ -143,8 +128,12 @@ void AUHALStream::Close() {
}
if (audio_unit_) {
- AudioUnitUninitialize(audio_unit_);
- AudioComponentInstanceDispose(audio_unit_);
+ OSStatus result = AudioUnitUninitialize(audio_unit_);
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "AudioUnitUninitialize() failed.";
+ result = AudioComponentInstanceDispose(audio_unit_);
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "AudioComponentInstanceDispose() failed.";
}
// Inform the audio manager that we have been closed. This will cause our
@@ -160,20 +149,30 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
}
stopped_ = false;
- notified_for_possible_device_change_ = false;
+ audio_fifo_.reset();
{
base::AutoLock auto_lock(source_lock_);
source_ = callback;
}
- AudioOutputUnitStart(audio_unit_);
+ OSStatus result = AudioOutputUnitStart(audio_unit_);
+ if (result == noErr)
+ return;
+
+ Stop();
+ OSSTATUS_DLOG(ERROR, result) << "AudioOutputUnitStart() failed.";
+ callback->OnError(this);
}
void AUHALStream::Stop() {
if (stopped_)
return;
- AudioOutputUnitStop(audio_unit_);
+ OSStatus result = AudioOutputUnitStop(audio_unit_);
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "AudioOutputUnitStop() failed.";
+ if (result != noErr)
+ source_->OnError(this);
base::AutoLock auto_lock(source_lock_);
source_ = NULL;
@@ -200,73 +199,69 @@ OSStatus AUHALStream::Render(
AudioBufferList* io_data) {
TRACE_EVENT0("audio", "AUHALStream::Render");
+ // If the stream parameters change for any reason, we need to insert a FIFO
+ // since the OnMoreData() pipeline can't handle frame size changes. Generally
+ // this is a temporary situation which can occur after a device change has
+ // occurred but the AudioManager hasn't received the notification yet.
if (number_of_frames != number_of_frames_) {
- // This can happen if we've suddenly changed sample-rates.
- // The stream should be stopping very soon.
- //
- // Unfortunately AUAudioInputStream and AUHALStream share the frame
- // size set by kAudioDevicePropertyBufferFrameSize above on a per process
- // basis. What this means is that the |number_of_frames| value may be
- // larger or smaller than the value set during ConfigureAUHAL().
- // In this case either audio input or audio output will be broken,
- // so just output silence.
- ZeroBufferList(io_data);
-
- // In case we missed a device notification, notify the AudioManager that the
- // device has changed. HandleDeviceChanges() will check to make sure the
- // device has actually changed before taking any action.
- if (!notified_for_possible_device_change_) {
- notified_for_possible_device_change_ = true;
- manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::HandleDeviceChanges, base::Unretained(manager_)));
+ // Create a FIFO on the fly to handle any discrepancies in callback rates.
+ if (!audio_fifo_) {
+ VLOG(1) << "Audio frame size change detected; adding FIFO to compensate.";
+ audio_fifo_.reset(new AudioPullFifo(
+ output_channels_,
+ number_of_frames_,
+ base::Bind(&AUHALStream::ProvideInput, base::Unretained(this))));
}
- return noErr;
- }
-
- if (input_channels_ > 0 && input_buffer_list_) {
- // Get the input data. |input_buffer_list_| is wrapped
- // to point to the data allocated in |input_bus_|.
- OSStatus result = AudioUnitRender(
- audio_unit_,
- flags,
- output_time_stamp,
- 1,
- number_of_frames,
- input_buffer_list_);
- if (result != noErr)
- ZeroBufferList(input_buffer_list_);
+ // Synchronous IO is not supported in this state.
+ if (input_channels_ > 0)
+ input_bus_->Zero();
+ } else {
+ if (input_channels_ > 0 && input_buffer_list_) {
+ // Get the input data. |input_buffer_list_| is wrapped
+ // to point to the data allocated in |input_bus_|.
+ OSStatus result = AudioUnitRender(audio_unit_,
+ flags,
+ output_time_stamp,
+ 1,
+ number_of_frames,
+ input_buffer_list_);
+ if (result != noErr)
+ ZeroBufferList(input_buffer_list_);
+ }
}
// Make |output_bus_| wrap the output AudioBufferList.
WrapBufferList(io_data, output_bus_.get(), number_of_frames);
// Update the playout latency.
- double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
+ const double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
+ current_hardware_pending_bytes_ = static_cast<uint32>(
+ (playout_latency_frames + 0.5) * params_.GetBytesPerFrame());
- uint32 hardware_pending_bytes = static_cast<uint32>
- ((playout_latency_frames + 0.5) * output_format_.mBytesPerFrame);
+ if (audio_fifo_)
+ audio_fifo_->Consume(output_bus_.get(), output_bus_->frames());
+ else
+ ProvideInput(0, output_bus_.get());
- {
- // Render() shouldn't be called except between AudioOutputUnitStart() and
- // AudioOutputUnitStop() calls, but crash reports have shown otherwise:
- // http://crbug.com/178765. We use |source_lock_| to prevent races and
- // crashes in Render() when |source_| is cleared.
- base::AutoLock auto_lock(source_lock_);
- if (!source_) {
- ZeroBufferList(io_data);
- return noErr;
- }
+ return noErr;
+}
- // Supply the input data and render the output data.
- source_->OnMoreIOData(
- input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, hardware_pending_bytes));
- output_bus_->Scale(volume_);
+void AUHALStream::ProvideInput(int frame_delay, AudioBus* dest) {
+ base::AutoLock auto_lock(source_lock_);
+ if (!source_) {
+ dest->Zero();
+ return;
}
- return noErr;
+ // Supply the input data and render the output data.
+ source_->OnMoreIOData(
+ input_bus_.get(),
+ dest,
+ AudioBuffersState(0,
+ current_hardware_pending_bytes_ +
+ frame_delay * params_.GetBytesPerFrame()));
+ dest->Scale(volume_);
}
// AUHAL callback.
@@ -453,7 +448,7 @@ bool AUHALStream::ConfigureAUHAL() {
OSStatus result = AudioComponentInstanceNew(comp, &audio_unit_);
if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "AudioComponentInstanceNew() failed.";
+ OSSTATUS_DLOG(ERROR, result) << "AudioComponentInstanceNew() failed.";
return false;
}
@@ -511,7 +506,7 @@ bool AUHALStream::ConfigureAUHAL() {
&buffer_size,
sizeof(buffer_size));
if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
+ OSSTATUS_DLOG(ERROR, result)
<< "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
return false;
}
@@ -532,7 +527,7 @@ bool AUHALStream::ConfigureAUHAL() {
result = AudioUnitInitialize(audio_unit_);
if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "AudioUnitInitialize() failed.";
+ OSSTATUS_DLOG(ERROR, result) << "AudioUnitInitialize() failed.";
return false;
}
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
index 66feb8d0d11..b488b73c0d1 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.h
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -28,6 +28,7 @@
namespace media {
class AudioManagerMac;
+class AudioPullFifo;
// Implementation of AudioOuputStream for Mac OS X using the
// AUHAL Audio Unit present in OS 10.4 and later.
@@ -83,6 +84,9 @@ class AUHALStream : public AudioOutputStream {
UInt32 number_of_frames,
AudioBufferList* io_data);
+ // Called by either |audio_fifo_| or Render() to provide audio data.
+ void ProvideInput(int frame_delay, AudioBus* dest);
+
// Helper method to enable input and output.
bool EnableIO(bool enable, UInt32 scope);
@@ -108,15 +112,15 @@ class AUHALStream : public AudioOutputStream {
double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
// Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
+ AudioManagerMac* const manager_;
- AudioParameters params_;
+ const AudioParameters params_;
// For convenience - same as in params_.
- int input_channels_;
- int output_channels_;
+ const int input_channels_;
+ const int output_channels_;
// Buffer-size.
- size_t number_of_frames_;
+ const size_t number_of_frames_;
// Pointer to the object that will provide the audio samples.
AudioSourceCallback* source_;
@@ -131,7 +135,7 @@ class AUHALStream : public AudioOutputStream {
// The audio device to use with the AUHAL.
// We can potentially handle both input and output with this device.
- AudioDeviceID device_;
+ const AudioDeviceID device_;
// The AUHAL Audio Unit which talks to |device_|.
AudioUnit audio_unit_;
@@ -145,10 +149,6 @@ class AUHALStream : public AudioOutputStream {
// The flag used to stop the streaming.
bool stopped_;
- // The flag used to indicate if the AudioManager has been notified of a
- // potential device change. Reset to false during Start().
- bool notified_for_possible_device_change_;
-
// The input AudioUnit renders its data here.
scoped_ptr<uint8[]> input_buffer_list_storage_;
AudioBufferList* input_buffer_list_;
@@ -159,6 +159,13 @@ class AUHALStream : public AudioOutputStream {
// Container for retrieving data from AudioSourceCallback::OnMoreIOData().
scoped_ptr<AudioBus> output_bus_;
+ // Dynamically allocated FIFO used when CoreAudio asks for unexpected frame
+ // sizes.
+ scoped_ptr<AudioPullFifo> audio_fifo_;
+
+ // Current buffer delay. Set by Render().
+ uint32 current_hardware_pending_bytes_;
+
DISALLOW_COPY_AND_ASSIGN(AUHALStream);
};
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
index 9b699ff10f8..d709554dfaf 100644
--- a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -42,7 +42,7 @@ class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
class AudioOutputStreamWrapper {
public:
explicit AudioOutputStreamWrapper()
- : audio_man_(AudioManager::Create()),
+ : audio_man_(AudioManager::CreateForTesting()),
format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
bits_per_sample_(kBitsPerSample) {
AudioParameters preferred_params =
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
index 7930567fd9c..4aee1179cfa 100644
--- a/chromium/media/audio/mac/audio_input_mac.cc
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -10,7 +10,6 @@
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_util.h"
namespace media {
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index d97f453ca99..dbc75bfea31 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -9,14 +9,11 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/data_buffer.h"
namespace media {
-static const int kMinIntervalBetweenVolumeUpdatesMs = 1000;
-
static std::ostream& operator<<(std::ostream& os,
const AudioStreamBasicDescription& format) {
os << "sample rate : " << format.mSampleRate << std::endl
@@ -104,7 +101,7 @@ AUAudioInputStream::AUAudioInputStream(
requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
- DLOG_IF(INFO, requested_size_frames > number_of_frames_) << "FIFO is used";
+ DVLOG_IF(0, requested_size_frames > number_of_frames_) << "FIFO is used";
const int number_of_bytes = number_of_frames_ * format_.mBytesPerFrame;
fifo_delay_bytes_ = requested_size_bytes_ - number_of_bytes;
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
index 9b5985117d7..9360befe575 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
@@ -95,7 +95,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
class MacAudioInputTest : public testing::Test {
protected:
- MacAudioInputTest() : audio_manager_(AudioManager::Create()) {}
+ MacAudioInputTest() : audio_manager_(AudioManager::CreateForTesting()) {}
virtual ~MacAudioInputTest() {}
// Convenience method which ensures that we are not running on the build
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index 8e4b969854e..be7dddd5bb6 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -13,7 +13,6 @@
#include "base/mac/scoped_cftyperef.h"
#include "base/strings/sys_string_conversions.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
#include "media/audio/mac/audio_auhal_mac.h"
#include "media/audio/mac/audio_input_mac.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
@@ -36,23 +35,6 @@ static const int kDefaultLowLatencyBufferSize = 128;
// Default sample-rate on most Apple hardware.
static const int kFallbackSampleRate = 44100;
-static int ChooseBufferSize(int output_sample_rate) {
- int buffer_size = kDefaultLowLatencyBufferSize;
- const int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size) {
- buffer_size = user_buffer_size;
- } else if (output_sample_rate > 48000) {
- // The default buffer size is too small for higher sample rates and may lead
- // to glitching. Adjust upwards by multiples of the default size.
- if (output_sample_rate <= 96000)
- buffer_size = 2 * kDefaultLowLatencyBufferSize;
- else if (output_sample_rate <= 192000)
- buffer_size = 4 * kDefaultLowLatencyBufferSize;
- }
-
- return buffer_size;
-}
-
static bool HasAudioHardware(AudioObjectPropertySelector selector) {
AudioDeviceID output_device_id = kAudioObjectUnknown;
const AudioObjectPropertyAddress property_address = {
@@ -238,8 +220,9 @@ static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
return audio_device_id;
}
-AudioManagerMac::AudioManagerMac()
- : current_sample_rate_(0) {
+AudioManagerMac::AudioManagerMac(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory),
+ current_sample_rate_(0) {
current_output_device_ = kAudioDeviceUnknown;
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
@@ -474,6 +457,7 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
if (result)
return std::string();
+ std::vector<std::string> associated_devices;
for (int i = 0; i < device_count; ++i) {
// Get the number of output channels of the device.
pa.mSelector = kAudioDevicePropertyStreams;
@@ -501,10 +485,30 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
std::string ret(base::SysCFStringRefToUTF8(uid));
CFRelease(uid);
- return ret;
+ associated_devices.push_back(ret);
}
// No matching device found.
+ if (associated_devices.empty())
+ return std::string();
+
+ // Return the device if there is only one associated device.
+ if (associated_devices.size() == 1)
+ return associated_devices[0];
+
+ // When there are multiple associated devices, we currently do not have a way
+ // to detect if a device (e.g. a digital output device) is actually connected
+ // to an endpoint, so we cannot randomly pick a device.
+ // We pick the device iff the associated device is the default output device.
+ const std::string default_device = GetDefaultOutputDeviceID();
+ for (std::vector<std::string>::const_iterator iter =
+ associated_devices.begin();
+ iter != associated_devices.end(); ++iter) {
+ if (default_device == *iter)
+ return *iter;
+ }
+
+ // Failed to figure out which is the matching device, return an emtpy string.
return std::string();
}
@@ -542,7 +546,7 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
// For I/O, the simplest case is when the default input and output
// devices are the same.
GetDefaultOutputDevice(&device);
- LOG(INFO) << "UNIFIED: default input and output devices are identical";
+ VLOG(0) << "UNIFIED: default input and output devices are identical";
} else {
// Some audio hardware is presented as separate input and output devices
// even though they are really the same physical hardware and
@@ -555,7 +559,7 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
// so we get the lowest latency and use fewer threads.
device = aggregate_device_manager_.GetDefaultAggregateDevice();
if (device != kAudioObjectUnknown)
- LOG(INFO) << "Using AGGREGATE audio device";
+ VLOG(0) << "Using AGGREGATE audio device";
}
if (device != kAudioObjectUnknown &&
@@ -670,16 +674,20 @@ AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
}
}
+ if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
+ channel_layout = CHANNEL_LAYOUT_DISCRETE;
+ else
+ hardware_channels = ChannelLayoutToChannelCount(channel_layout);
+
AudioParameters params(
AudioParameters::AUDIO_PCM_LOW_LATENCY,
channel_layout,
+ hardware_channels,
input_channels,
hardware_sample_rate,
16,
- buffer_size);
-
- if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
- params.SetDiscreteChannels(hardware_channels);
+ buffer_size,
+ AudioParameters::NO_EFFECTS);
return params;
}
@@ -722,8 +730,25 @@ void AudioManagerMac::HandleDeviceChanges() {
NotifyAllOutputDeviceChangeListeners();
}
-AudioManager* CreateAudioManager() {
- return new AudioManagerMac();
+int AudioManagerMac::ChooseBufferSize(int output_sample_rate) {
+ int buffer_size = kDefaultLowLatencyBufferSize;
+ const int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size) {
+ buffer_size = user_buffer_size;
+ } else if (output_sample_rate > 48000) {
+ // The default buffer size is too small for higher sample rates and may lead
+ // to glitching. Adjust upwards by multiples of the default size.
+ if (output_sample_rate <= 96000)
+ buffer_size = 2 * kDefaultLowLatencyBufferSize;
+ else if (output_sample_rate <= 192000)
+ buffer_size = 4 * kDefaultLowLatencyBufferSize;
+ }
+
+ return buffer_size;
+}
+
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
+ return new AudioManagerMac(audio_log_factory);
}
} // namespace media
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index d162554b405..fb521c940de 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -22,7 +22,7 @@ namespace media {
// the AudioManager class.
class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
public:
- AudioManagerMac();
+ AudioManagerMac(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
@@ -62,10 +62,6 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
static int HardwareSampleRateForDevice(AudioDeviceID device_id);
static int HardwareSampleRate();
- // Notify streams of a device change if the default output device or its
- // sample rate has changed, otherwise does nothing.
- void HandleDeviceChanges();
-
protected:
virtual ~AudioManagerMac();
@@ -80,6 +76,12 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
void CreateDeviceListener();
void DestroyDeviceListener();
+ int ChooseBufferSize(int output_sample_rate);
+
+ // Notify streams of a device change if the default output device or its
+ // sample rate has changed, otherwise does nothing.
+ void HandleDeviceChanges();
+
scoped_ptr<AudioDeviceListenerMac> output_device_listener_;
// Track the output sample-rate and the default output device
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.cc b/chromium/media/audio/mac/audio_synchronized_mac.cc
index a2484ca67fe..a9bc88e2bd3 100644
--- a/chromium/media/audio/mac/audio_synchronized_mac.cc
+++ b/chromium/media/audio/mac/audio_synchronized_mac.cc
@@ -11,7 +11,6 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/channel_mixer.h"
diff --git a/chromium/media/audio/mac/audio_unified_mac.cc b/chromium/media/audio/mac/audio_unified_mac.cc
index 67ec2fe6f3e..d1dc007e6a8 100644
--- a/chromium/media/audio/mac/audio_unified_mac.cc
+++ b/chromium/media/audio/mac/audio_unified_mac.cc
@@ -9,7 +9,6 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
#include "media/audio/mac/audio_manager_mac.h"
namespace media {
diff --git a/chromium/media/audio/mock_audio_manager.cc b/chromium/media/audio/mock_audio_manager.cc
index a164332a64a..f2074d65357 100644
--- a/chromium/media/audio/mock_audio_manager.cc
+++ b/chromium/media/audio/mock_audio_manager.cc
@@ -25,8 +25,8 @@ bool MockAudioManager::HasAudioInputDevices() {
return true;
}
-string16 MockAudioManager::GetAudioInputDeviceModel() {
- return string16();
+base::string16 MockAudioManager::GetAudioInputDeviceModel() {
+ return base::string16();
}
void MockAudioManager::ShowAudioInputSettings() {
@@ -34,6 +34,11 @@ void MockAudioManager::ShowAudioInputSettings() {
void MockAudioManager::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ device_names->push_back(media::AudioDeviceName("fake_device_name_1",
+ "fake_device_id_1"));
+ device_names->push_back(media::AudioDeviceName("fake_device_name_2",
+ "fake_device_id_2"));
}
void MockAudioManager::GetAudioOutputDeviceNames(
@@ -98,4 +103,11 @@ std::string MockAudioManager::GetAssociatedOutputDeviceID(
return std::string();
}
+scoped_ptr<AudioLog> MockAudioManager::CreateAudioLog(
+ AudioLogFactory::AudioComponent component) {
+ return scoped_ptr<AudioLog>();
+}
+
+void MockAudioManager::FixWedgedAudio() {}
+
} // namespace media.
diff --git a/chromium/media/audio/mock_audio_manager.h b/chromium/media/audio/mock_audio_manager.h
index 7bc30f578e7..2d71fe8493f 100644
--- a/chromium/media/audio/mock_audio_manager.h
+++ b/chromium/media/audio/mock_audio_manager.h
@@ -27,7 +27,7 @@ class MockAudioManager : public media::AudioManager {
virtual bool HasAudioInputDevices() OVERRIDE;
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+ virtual base::string16 GetAudioInputDeviceModel() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
@@ -67,9 +67,15 @@ class MockAudioManager : public media::AudioManager {
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) OVERRIDE;
- private:
+ virtual scoped_ptr<AudioLog> CreateAudioLog(
+ AudioLogFactory::AudioComponent component) OVERRIDE;
+
+ virtual void FixWedgedAudio() OVERRIDE;
+
+ protected:
virtual ~MockAudioManager();
+ private:
scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
index a97ea8f625e..b378b02d0cd 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
@@ -11,7 +11,6 @@
#include "base/stl_util.h"
#include "media/audio/audio_output_dispatcher.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
#include "media/audio/pulse/pulse_output.h"
#include "media/audio/pulse/pulse_stubs.h"
#include "media/base/channel_layout.h"
@@ -65,8 +64,9 @@ AudioParameters AudioManagerOpenBSD::GetInputStreamParameters(
kDefaultSampleRate, 16, kDefaultInputBufferSize);
}
-AudioManagerOpenBSD::AudioManagerOpenBSD()
- : pulse_library_is_initialized_(false) {
+AudioManagerOpenBSD::AudioManagerOpenBSD(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory),
+ pulse_library_is_initialized_(false) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
StubPathMap paths;
@@ -139,7 +139,7 @@ AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream(
@@ -152,8 +152,8 @@ AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream(
// TODO(xians): Merge AudioManagerOpenBSD with AudioManagerPulse;
// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerOpenBSD();
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
+ return new AudioManagerOpenBSD(audio_log_factory);
}
} // namespace media
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
index e4bb3948d28..113f5915ae1 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -14,7 +14,7 @@ namespace media {
class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
public:
- AudioManagerOpenBSD();
+ AudioManagerOpenBSD(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index 5c09f149057..d369d135bef 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -10,9 +10,8 @@
#include "base/logging.h"
#include "base/nix/xdg_util.h"
#include "base/stl_util.h"
+#include "media/audio/alsa/audio_manager_alsa.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/linux/audio_manager_linux.h"
#include "media/audio/pulse/pulse_input.h"
#include "media/audio/pulse/pulse_output.h"
#include "media/audio/pulse/pulse_unified.h"
@@ -39,8 +38,8 @@ static const base::FilePath::CharType kPulseLib[] =
FILE_PATH_LITERAL("libpulse.so.0");
// static
-AudioManager* AudioManagerPulse::Create() {
- scoped_ptr<AudioManagerPulse> ret(new AudioManagerPulse());
+AudioManager* AudioManagerPulse::Create(AudioLogFactory* audio_log_factory) {
+ scoped_ptr<AudioManagerPulse> ret(new AudioManagerPulse(audio_log_factory));
if (ret->Init())
return ret.release();
@@ -48,8 +47,9 @@ AudioManager* AudioManagerPulse::Create() {
return NULL;
}
-AudioManagerPulse::AudioManagerPulse()
- : input_mainloop_(NULL),
+AudioManagerPulse::AudioManagerPulse(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory),
+ input_mainloop_(NULL),
input_context_(NULL),
devices_(NULL),
native_input_sample_rate_(0) {
@@ -78,7 +78,7 @@ bool AudioManagerPulse::HasAudioInputDevices() {
}
void AudioManagerPulse::ShowAudioInputSettings() {
- AudioManagerLinux::ShowLinuxAudioInputSettings();
+ AudioManagerAlsa::ShowLinuxAudioInputSettings();
}
void AudioManagerPulse::GetAudioDeviceNames(
@@ -181,7 +181,7 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
AudioOutputStream* AudioManagerPulse::MakeOutputStream(
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.h b/chromium/media/audio/pulse/audio_manager_pulse.h
index 36396639929..45fb8cb56fa 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.h
+++ b/chromium/media/audio/pulse/audio_manager_pulse.h
@@ -16,10 +16,10 @@ namespace media {
class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
public:
- AudioManagerPulse();
+ AudioManagerPulse(AudioLogFactory* audio_log_factory);
virtual ~AudioManagerPulse();
- static AudioManager* Create();
+ static AudioManager* Create(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
diff --git a/chromium/media/audio/shared_memory_util.cc b/chromium/media/audio/shared_memory_util.cc
deleted file mode 100644
index 523cdb9646c..00000000000
--- a/chromium/media/audio/shared_memory_util.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/shared_memory_util.h"
-
-#include <algorithm>
-
-#include "base/atomicops.h"
-#include "base/logging.h"
-
-using base::subtle::Atomic32;
-
-static const uint32 kUnknownDataSize = static_cast<uint32>(-1);
-
-namespace media {
-
-uint32 TotalSharedMemorySizeInBytes(uint32 packet_size) {
- // Need to reserve extra 4 bytes for size of data.
- return packet_size + sizeof(Atomic32);
-}
-
-uint32 PacketSizeInBytes(uint32 shared_memory_created_size) {
- return shared_memory_created_size - sizeof(Atomic32);
-}
-
-uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Actual data size stored at the end of the buffer.
- uint32 actual_data_size =
- base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
- return std::min(actual_data_size, packet_size);
-}
-
-void SetActualDataSizeInBytes(void* shared_memory_ptr,
- uint32 packet_size,
- uint32 actual_data_size) {
- char* ptr = static_cast<char*>(shared_memory_ptr) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Set actual data size at the end of the buffer.
- base::subtle::Release_Store(reinterpret_cast<volatile Atomic32*>(ptr),
- actual_data_size);
-}
-
-void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size,
- uint32 actual_data_size) {
- SetActualDataSizeInBytes(shared_memory->memory(),
- packet_size, actual_data_size);
-}
-
-void SetUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- SetActualDataSizeInBytes(shared_memory, packet_size, kUnknownDataSize);
-}
-
-bool IsUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Actual data size stored at the end of the buffer.
- uint32 actual_data_size =
- base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
- return actual_data_size == kUnknownDataSize;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/shared_memory_util.h b/chromium/media/audio/shared_memory_util.h
deleted file mode 100644
index 9186d5c9529..00000000000
--- a/chromium/media/audio/shared_memory_util.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
-#define MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
-
-#include "base/basictypes.h"
-#include "base/memory/shared_memory.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Value sent by the controller to the renderer in low-latency mode
-// indicating that the stream is paused.
-enum { kPauseMark = -1 };
-
-// Functions that handle data buffer passed between processes in the shared
-// memory. Called on both IPC sides. These are necessary because the shared
-// memory has a layout: the last word in the block is the data size in bytes.
-
-MEDIA_EXPORT uint32 TotalSharedMemorySizeInBytes(uint32 packet_size);
-MEDIA_EXPORT uint32 PacketSizeInBytes(uint32 shared_memory_created_size);
-MEDIA_EXPORT uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size);
-MEDIA_EXPORT void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size,
- uint32 actual_data_size);
-MEDIA_EXPORT void SetActualDataSizeInBytes(void* shared_memory_ptr,
- uint32 packet_size,
- uint32 actual_data_size);
-MEDIA_EXPORT void SetUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size);
-MEDIA_EXPORT bool IsUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size);
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
diff --git a/chromium/media/audio/simple_sources.cc b/chromium/media/audio/simple_sources.cc
index 7aa74d6e5f1..275413a232c 100644
--- a/chromium/media/audio/simple_sources.cc
+++ b/chromium/media/audio/simple_sources.cc
@@ -10,7 +10,6 @@
#include <algorithm>
#include "base/logging.h"
-#include "media/audio/audio_util.h"
namespace media {
diff --git a/chromium/media/audio/sounds/audio_stream_handler.cc b/chromium/media/audio/sounds/audio_stream_handler.cc
new file mode 100644
index 00000000000..08608ac4187
--- /dev/null
+++ b/chromium/media/audio/sounds/audio_stream_handler.cc
@@ -0,0 +1,188 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/sounds/audio_stream_handler.h"
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/base/channel_layout.h"
+
+namespace media {
+
+namespace {
+
+// Volume percent.
+const double kOutputVolumePercent = 0.8;
+
+// The number of frames each OnMoreData() call will request.
+const int kDefaultFrameCount = 1024;
+
+AudioStreamHandler::TestObserver* g_observer_for_testing = NULL;
+AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
+
+} // namespace
+
+class AudioStreamHandler::AudioStreamContainer
+ : public AudioOutputStream::AudioSourceCallback {
+ public:
+ AudioStreamContainer(const WavAudioHandler& wav_audio,
+ const AudioParameters& params)
+ : stream_(NULL),
+ wav_audio_(wav_audio),
+ params_(params),
+ cursor_(0) {
+ }
+
+ virtual ~AudioStreamContainer() {
+ DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ }
+
+ void Play() {
+ DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+
+ if (!stream_) {
+ stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params_,
+ std::string(),
+ std::string());
+ if (!stream_ || !stream_->Open()) {
+ LOG(ERROR) << "Failed to open an output stream.";
+ return;
+ }
+ stream_->SetVolume(kOutputVolumePercent);
+ } else {
+ // TODO (ygorshenin@): implement smart stream rewind.
+ stream_->Stop();
+ }
+
+ cursor_ = 0;
+ if (g_audio_source_for_testing)
+ stream_->Start(g_audio_source_for_testing);
+ else
+ stream_->Start(this);
+
+ if (g_observer_for_testing)
+ g_observer_for_testing->OnPlay();
+ }
+
+ void Stop() {
+ DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ if (!stream_)
+ return;
+ stream_->Stop();
+ stream_->Close();
+ stream_ = NULL;
+
+ if (g_observer_for_testing)
+ g_observer_for_testing->OnStop(cursor_);
+ }
+
+ private:
+ // AudioOutputStream::AudioSourceCallback overrides:
+ // Following methods could be called from *ANY* thread.
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState /* state */) OVERRIDE {
+ size_t bytes_written = 0;
+ if (wav_audio_.AtEnd(cursor_) ||
+ !wav_audio_.CopyTo(dest, cursor_, &bytes_written)) {
+ AudioManager::Get()->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioStreamContainer::Stop, base::Unretained(this)));
+ return 0;
+ }
+ cursor_ += bytes_written;
+
+ return dest->frames();
+ }
+
+ virtual int OnMoreIOData(AudioBus* /* source */,
+ AudioBus* dest,
+ AudioBuffersState state) OVERRIDE {
+ return OnMoreData(dest, state);
+ }
+
+ virtual void OnError(AudioOutputStream* /* stream */) OVERRIDE {
+ LOG(ERROR) << "Error during system sound reproduction.";
+ }
+
+ AudioOutputStream* stream_;
+
+ const WavAudioHandler wav_audio_;
+ const AudioParameters params_;
+
+ size_t cursor_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioStreamContainer);
+};
+
+AudioStreamHandler::AudioStreamHandler(const base::StringPiece& wav_data)
+ : wav_audio_(wav_data),
+ initialized_(false) {
+ AudioManager* manager = AudioManager::Get();
+ if (!manager) {
+ LOG(ERROR) << "Can't get access to audio manager.";
+ return;
+ }
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(wav_audio_.num_channels()),
+ wav_audio_.sample_rate(),
+ wav_audio_.bits_per_sample(),
+ kDefaultFrameCount);
+ if (!params.IsValid()) {
+ LOG(ERROR) << "Audio params are invalid.";
+ return;
+ }
+ stream_.reset(new AudioStreamContainer(wav_audio_, params));
+ initialized_ = true;
+}
+
+AudioStreamHandler::~AudioStreamHandler() {
+ DCHECK(CalledOnValidThread());
+ AudioManager::Get()->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
+ AudioManager::Get()->GetMessageLoop()->DeleteSoon(FROM_HERE,
+ stream_.release());
+}
+
+bool AudioStreamHandler::IsInitialized() const {
+ DCHECK(CalledOnValidThread());
+ return initialized_;
+}
+
+bool AudioStreamHandler::Play() {
+ DCHECK(CalledOnValidThread());
+
+ if (!IsInitialized())
+ return false;
+
+ AudioManager::Get()->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&AudioStreamContainer::Play),
+ base::Unretained(stream_.get())));
+ return true;
+}
+
+void AudioStreamHandler::Stop() {
+ DCHECK(CalledOnValidThread());
+ AudioManager::Get()->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
+}
+
+// static
+void AudioStreamHandler::SetObserverForTesting(TestObserver* observer) {
+ g_observer_for_testing = observer;
+}
+
+// static
+void AudioStreamHandler::SetAudioSourceForTesting(
+ AudioOutputStream::AudioSourceCallback* source) {
+ g_audio_source_for_testing = source;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/audio_stream_handler.h b/chromium/media/audio/sounds/audio_stream_handler.h
new file mode 100644
index 00000000000..7c63a24f034
--- /dev/null
+++ b/chromium/media/audio/sounds/audio_stream_handler.h
@@ -0,0 +1,76 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SOUNDS_AUDIO_STREAM_HANDLER_H_
+#define MEDIA_AUDIO_SOUNDS_AUDIO_STREAM_HANDLER_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/non_thread_safe.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/sounds/wav_audio_handler.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioManager;
+
+// This class sends a sound to the audio manager.
+class MEDIA_EXPORT AudioStreamHandler : public base::NonThreadSafe {
+ public:
+ class TestObserver {
+ public:
+ virtual ~TestObserver() {}
+
+ // Following methods will be called only from the audio thread.
+
+ // Called when AudioOutputStreamProxy::Start() was successfully called.
+ virtual void OnPlay() = 0;
+
+ // Called when AudioOutputStreamProxy::Stop() was successfully called.
+ virtual void OnStop(size_t cursor) = 0;
+ };
+
+ // C-tor for AudioStreamHandler. |wav_data| should be a raw
+ // uncompressed WAVE data which will be sent to the audio manager.
+ explicit AudioStreamHandler(const base::StringPiece& wav_data);
+ virtual ~AudioStreamHandler();
+
+ // Returns true iff AudioStreamHandler is correctly initialized;
+ bool IsInitialized() const;
+
+ // Stops any previous playback if it's still not completed and
+ // starts new playback. Volume level will be set according to
+ // current settings and won't be changed during playback. Returns
+ // true iff new playback was successfully started.
+ bool Play();
+
+ // Stops current playback.
+ void Stop();
+
+ const WavAudioHandler& wav_audio_handler() const { return wav_audio_; }
+
+ private:
+ friend class AudioStreamHandlerTest;
+ friend class SoundsManagerTest;
+
+ class AudioStreamContainer;
+
+ static void SetObserverForTesting(TestObserver* observer);
+ static void SetAudioSourceForTesting(
+ AudioOutputStream::AudioSourceCallback* source);
+
+ WavAudioHandler wav_audio_;
+ scoped_ptr<AudioStreamContainer> stream_;
+
+ bool initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioStreamHandler);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SOUNDS_AUDIO_STREAM_HANDLER_H_
diff --git a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
new file mode 100644
index 00000000000..50bc301c38a
--- /dev/null
+++ b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
@@ -0,0 +1,108 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/simple_sources.h"
+#include "media/audio/sounds/audio_stream_handler.h"
+#include "media/audio/sounds/test_data.h"
+#include "media/base/channel_layout.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class AudioStreamHandlerTest : public testing::Test {
+ public:
+ AudioStreamHandlerTest() {}
+ virtual ~AudioStreamHandlerTest() {}
+
+ virtual void SetUp() OVERRIDE {
+ audio_manager_.reset(AudioManager::CreateForTesting());
+
+ base::StringPiece data(kTestAudioData, arraysize(kTestAudioData));
+ audio_stream_handler_.reset(new AudioStreamHandler(data));
+ }
+
+ virtual void TearDown() OVERRIDE {
+ audio_stream_handler_.reset();
+ audio_manager_.reset();
+ }
+
+ AudioStreamHandler* audio_stream_handler() {
+ return audio_stream_handler_.get();
+ }
+
+ void SetObserverForTesting(AudioStreamHandler::TestObserver* observer) {
+ AudioStreamHandler::SetObserverForTesting(observer);
+ }
+
+ void SetAudioSourceForTesting(
+ AudioOutputStream::AudioSourceCallback* source) {
+ AudioStreamHandler::SetAudioSourceForTesting(source);
+ }
+
+ private:
+ scoped_ptr<AudioManager> audio_manager_;
+ scoped_ptr<AudioStreamHandler> audio_stream_handler_;
+
+ base::MessageLoop message_loop_;
+};
+
+TEST_F(AudioStreamHandlerTest, Play) {
+ base::RunLoop run_loop;
+ TestObserver observer(run_loop.QuitClosure());
+
+ SetObserverForTesting(&observer);
+
+ ASSERT_TRUE(audio_stream_handler()->IsInitialized());
+ ASSERT_TRUE(audio_stream_handler()->Play());
+
+ run_loop.Run();
+
+ SetObserverForTesting(NULL);
+
+ ASSERT_EQ(1, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_stop_requests());
+ ASSERT_EQ(4, observer.cursor());
+}
+
+TEST_F(AudioStreamHandlerTest, Rewind) {
+ base::RunLoop run_loop;
+ TestObserver observer(run_loop.QuitClosure());
+ SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 8000);
+
+ SetObserverForTesting(&observer);
+ SetAudioSourceForTesting(&source);
+
+ ASSERT_TRUE(audio_stream_handler()->IsInitialized());
+
+ ASSERT_TRUE(audio_stream_handler()->Play());
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&AudioStreamHandler::Play),
+ base::Unretained(audio_stream_handler())),
+ base::TimeDelta::FromSeconds(3));
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AudioStreamHandler::Stop,
+ base::Unretained(audio_stream_handler())),
+ base::TimeDelta::FromSeconds(6));
+
+ run_loop.Run();
+
+ SetObserverForTesting(NULL);
+ SetAudioSourceForTesting(NULL);
+
+ ASSERT_EQ(2, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_stop_requests());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/sounds_manager.cc b/chromium/media/audio/sounds/sounds_manager.cc
new file mode 100644
index 00000000000..e93dc6588dd
--- /dev/null
+++ b/chromium/media/audio/sounds/sounds_manager.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/sounds/sounds_manager.h"
+
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/sounds/audio_stream_handler.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+namespace {
+
+SoundsManager* g_instance = NULL;
+
+// SoundsManagerImpl ---------------------------------------------------
+
+class SoundsManagerImpl : public SoundsManager {
+ public:
+ SoundsManagerImpl();
+ virtual ~SoundsManagerImpl();
+
+ // SoundsManager implementation:
+ virtual bool Initialize(SoundKey key,
+ const base::StringPiece& data) OVERRIDE;
+ virtual bool Play(SoundKey key) OVERRIDE;
+ virtual base::TimeDelta GetDuration(SoundKey key) OVERRIDE;
+
+ private:
+ base::hash_map<SoundKey, linked_ptr<AudioStreamHandler> > handlers_;
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(SoundsManagerImpl);
+};
+
+SoundsManagerImpl::SoundsManagerImpl()
+ : message_loop_(AudioManager::Get()->GetMessageLoop()) {}
+
+SoundsManagerImpl::~SoundsManagerImpl() { DCHECK(CalledOnValidThread()); }
+
+bool SoundsManagerImpl::Initialize(SoundKey key,
+ const base::StringPiece& data) {
+ if (handlers_.find(key) != handlers_.end() && handlers_[key]->IsInitialized())
+ return true;
+ linked_ptr<AudioStreamHandler> handler(new AudioStreamHandler(data));
+ if (!handler->IsInitialized()) {
+ LOG(WARNING) << "Can't initialize AudioStreamHandler for key=" << key;
+ return false;
+ }
+ handlers_[key] = handler;
+ return true;
+}
+
+bool SoundsManagerImpl::Play(SoundKey key) {
+ DCHECK(CalledOnValidThread());
+ if (handlers_.find(key) == handlers_.end() ||
+ !handlers_[key]->IsInitialized()) {
+ return false;
+ }
+ return handlers_[key]->Play();
+}
+
+base::TimeDelta SoundsManagerImpl::GetDuration(SoundKey key) {
+ DCHECK(CalledOnValidThread());
+ if (handlers_.find(key) == handlers_.end() ||
+ !handlers_[key]->IsInitialized()) {
+ return base::TimeDelta();
+ }
+ const WavAudioHandler& wav_audio = handlers_[key]->wav_audio_handler();
+ const int64 size = wav_audio.size();
+ const int64 rate = wav_audio.byte_rate();
+ return base::TimeDelta::FromMicroseconds(size * 1000000 / rate);
+}
+
+// SoundsManagerStub ---------------------------------------------------
+
+class SoundsManagerStub : public SoundsManager {
+ public:
+ SoundsManagerStub();
+ virtual ~SoundsManagerStub();
+
+ // SoundsManager implementation:
+ virtual bool Initialize(SoundKey key,
+ const base::StringPiece& data) OVERRIDE;
+ virtual bool Play(SoundKey key) OVERRIDE;
+ virtual base::TimeDelta GetDuration(SoundKey key) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SoundsManagerStub);
+};
+
+SoundsManagerStub::SoundsManagerStub() {}
+
+SoundsManagerStub::~SoundsManagerStub() { DCHECK(CalledOnValidThread()); }
+
+bool SoundsManagerStub::Initialize(SoundKey /* key */,
+ const base::StringPiece& /* data */) {
+ DCHECK(CalledOnValidThread());
+ return false;
+}
+
+bool SoundsManagerStub::Play(SoundKey /* key */) {
+ DCHECK(CalledOnValidThread());
+ return false;
+}
+
+base::TimeDelta SoundsManagerStub::GetDuration(SoundKey /* key */) {
+ DCHECK(CalledOnValidThread());
+ return base::TimeDelta();
+}
+
+} // namespace
+
+SoundsManager::SoundsManager() {}
+
+SoundsManager::~SoundsManager() { DCHECK(CalledOnValidThread()); }
+
+// static
+void SoundsManager::Create() {
+ CHECK(!g_instance) << "SoundsManager::Create() is called twice";
+ const bool enabled = !CommandLine::ForCurrentProcess()->HasSwitch(
+ ::switches::kDisableSystemSoundsManager);
+ if (enabled)
+ g_instance = new SoundsManagerImpl();
+ else
+ g_instance = new SoundsManagerStub();
+}
+
+// static
+void SoundsManager::Shutdown() {
+ CHECK(g_instance) << "SoundsManager::Shutdown() is called "
+ << "without previous call to Create()";
+ delete g_instance;
+ g_instance = NULL;
+}
+
+// static
+SoundsManager* SoundsManager::Get() {
+ CHECK(g_instance) << "SoundsManager::Get() is called before Create()";
+ return g_instance;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/sounds_manager.h b/chromium/media/audio/sounds/sounds_manager.h
new file mode 100644
index 00000000000..7ff6aafffdc
--- /dev/null
+++ b/chromium/media/audio/sounds/sounds_manager.h
@@ -0,0 +1,56 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SOUNDS_SOUNDS_MANAGER_H_
+#define MEDIA_AUDIO_SOUNDS_SOUNDS_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// This class is used for reproduction of system sounds. All methods
+// should be accessed from the Audio thread.
+class MEDIA_EXPORT SoundsManager : public base::NonThreadSafe {
+ public:
+ typedef int SoundKey;
+
+ // Creates a singleton instance of the SoundsManager.
+ static void Create();
+
+ // Removes a singleton instance of the SoundsManager.
+ static void Shutdown();
+
+ // Returns a pointer to a singleton instance of the SoundsManager.
+ static SoundsManager* Get();
+
+ // Initializes SoundsManager with the wav data for the system
+ // sounds. Returns true if SoundsManager was successfully
+ // initialized.
+ virtual bool Initialize(SoundKey key, const base::StringPiece& data) = 0;
+
+ // Plays sound identified by |key|, returns false if SoundsManager
+ // was not properly initialized.
+ virtual bool Play(SoundKey key) = 0;
+
+ // Returns duration of the sound identified by |key|. If SoundsManager
+ // was not properly initialized or |key| was not registered, this
+ // method returns an empty value.
+ virtual base::TimeDelta GetDuration(SoundKey key) = 0;
+
+ protected:
+ SoundsManager();
+ virtual ~SoundsManager();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SoundsManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SOUNDS_SOUNDS_MANAGER_H_
diff --git a/chromium/media/audio/sounds/sounds_manager_unittest.cc b/chromium/media/audio/sounds/sounds_manager_unittest.cc
new file mode 100644
index 00000000000..5aa3694e838
--- /dev/null
+++ b/chromium/media/audio/sounds/sounds_manager_unittest.cc
@@ -0,0 +1,69 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/string_piece.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/sounds/audio_stream_handler.h"
+#include "media/audio/sounds/sounds_manager.h"
+#include "media/audio/sounds/test_data.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class SoundsManagerTest : public testing::Test {
+ public:
+ SoundsManagerTest() {}
+ virtual ~SoundsManagerTest() {}
+
+ virtual void SetUp() OVERRIDE {
+ audio_manager_.reset(AudioManager::CreateForTesting());
+ SoundsManager::Create();
+ }
+
+ virtual void TearDown() OVERRIDE {
+ SoundsManager::Shutdown();
+ audio_manager_.reset();
+ }
+
+ void SetObserverForTesting(AudioStreamHandler::TestObserver* observer) {
+ AudioStreamHandler::SetObserverForTesting(observer);
+ }
+
+ private:
+ scoped_ptr<AudioManager> audio_manager_;
+
+ base::MessageLoop message_loop_;
+};
+
+TEST_F(SoundsManagerTest, Play) {
+ ASSERT_TRUE(SoundsManager::Get());
+
+ base::RunLoop run_loop;
+ TestObserver observer(run_loop.QuitClosure());
+
+ SetObserverForTesting(&observer);
+
+ ASSERT_TRUE(SoundsManager::Get()->Initialize(
+ kTestAudioKey,
+ base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
+ ASSERT_EQ(41,
+ SoundsManager::Get()->GetDuration(kTestAudioKey).InMicroseconds());
+ ASSERT_TRUE(SoundsManager::Get()->Play(kTestAudioKey));
+ run_loop.Run();
+
+ ASSERT_EQ(1, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_stop_requests());
+ ASSERT_EQ(4, observer.cursor());
+
+ SetObserverForTesting(NULL);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/test_data.cc b/chromium/media/audio/sounds/test_data.cc
new file mode 100644
index 00000000000..dc667c9996d
--- /dev/null
+++ b/chromium/media/audio/sounds/test_data.cc
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/sounds/test_data.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+
+TestObserver::TestObserver(const base::Closure& quit)
+ : loop_(base::MessageLoop::current()),
+ quit_(quit),
+ num_play_requests_(0),
+ num_stop_requests_(0),
+ cursor_(0) {
+ DCHECK(loop_);
+}
+
+TestObserver::~TestObserver() {
+}
+
+void TestObserver::OnPlay() {
+ ++num_play_requests_;
+}
+
+void TestObserver::OnStop(size_t cursor) {
+ ++num_stop_requests_;
+ cursor_ = cursor;
+ loop_->PostTask(FROM_HERE, quit_);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/test_data.h b/chromium/media/audio/sounds/test_data.h
new file mode 100644
index 00000000000..d7fb11ddb6b
--- /dev/null
+++ b/chromium/media/audio/sounds/test_data.h
@@ -0,0 +1,51 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SOUNDS_TEST_UTILS_H_
+#define MEDIA_AUDIO_SOUNDS_TEST_UTILS_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "media/audio/sounds/audio_stream_handler.h"
+
+namespace base {
+class MessageLoop;
+}
+
+namespace media {
+
+const int kTestAudioKey = 1000;
+
+const char kTestAudioData[] = "RIFF\x26\x00\x00\x00WAVEfmt \x10\x00\x00\x00"
+ "\x01\x00\x02\x00\x80\xbb\x00\x00\x00\x77\x01\x00\x02\x00\x10\x00"
+ "data\x04\x00\x00\x00\x01\x00\x01\x00";
+
+class TestObserver : public AudioStreamHandler::TestObserver {
+ public:
+ TestObserver(const base::Closure& quit);
+ virtual ~TestObserver();
+
+ // AudioStreamHandler::TestObserver implementation:
+ virtual void OnPlay() OVERRIDE;
+ virtual void OnStop(size_t cursor) OVERRIDE;
+
+ int num_play_requests() const { return num_play_requests_; }
+ int num_stop_requests() const { return num_stop_requests_; }
+ int cursor() const { return cursor_; }
+
+ private:
+ base::MessageLoop* loop_;
+ base::Closure quit_;
+
+ int num_play_requests_;
+ int num_stop_requests_;
+ int cursor_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestObserver);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SOUNDS_TEST_UTILS_H_
diff --git a/chromium/media/audio/sounds/wav_audio_handler.cc b/chromium/media/audio/sounds/wav_audio_handler.cc
new file mode 100644
index 00000000000..20eab8be437
--- /dev/null
+++ b/chromium/media/audio/sounds/wav_audio_handler.cc
@@ -0,0 +1,141 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/sounds/wav_audio_handler.h"
+
+#include <algorithm>
+#include <cstring>
+
+#include "base/logging.h"
+#include "base/sys_byteorder.h"
+#include "media/base/audio_bus.h"
+
+namespace {
+
+const char kChunkId[] = "RIFF";
+const char kFormat[] = "WAVE";
+const char kSubchunk1Id[] = "fmt ";
+const char kSubchunk2Id[] = "data";
+
+// The size of the header of a wav file. The header consists of 'RIFF', 4 bytes
+// of total data length, and 'WAVE'.
+const size_t kWavFileHeaderSize = 12;
+
+// The size of a chunk header in wav file format. A chunk header consists of a
+// tag ('fmt ' or 'data') and 4 bytes of chunk length.
+const size_t kChunkHeaderSize = 8;
+
+// The minimum size of 'fmt' chunk.
+const size_t kFmtChunkMinimumSize = 16;
+
+// The offsets of 'fmt' fields.
+const size_t kAudioFormatOffset = 0;
+const size_t kChannelOffset = 2;
+const size_t kSampleRateOffset = 4;
+const size_t kByteRateOffset = 8;
+const size_t kBitsPerSampleOffset = 14;
+
+// Some constants for audio format.
+const int kAudioFormatPCM = 1;
+
+// Reads an integer from |data| with |offset|.
+template<typename T> T ReadInt(const base::StringPiece& data, size_t offset) {
+ CHECK_LE(offset + sizeof(T), data.size());
+ T result;
+ memcpy(&result, data.data() + offset, sizeof(T));
+#if !defined(ARCH_CPU_LITTLE_ENDIAN)
+ result = base::ByteSwap(result);
+#endif
+ return result;
+}
+
+} // namespace
+
+namespace media {
+
+WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
+ : num_channels_(0),
+ sample_rate_(0),
+ byte_rate_(0),
+ bits_per_sample_(0) {
+ CHECK_LE(kWavFileHeaderSize, wav_data.size()) << "wav data is too small";
+ CHECK(wav_data.starts_with(kChunkId) &&
+ memcmp(wav_data.data() + 8, kFormat, 4) == 0)
+ << "incorrect wav header";
+
+ uint32 total_length = std::min(ReadInt<uint32>(wav_data, 4),
+ static_cast<uint32>(wav_data.size()));
+ uint32 offset = kWavFileHeaderSize;
+ while (offset < total_length) {
+ const int length = ParseSubChunk(wav_data.substr(offset));
+ CHECK_LE(0, length) << "can't parse wav sub-chunk";
+ offset += length;
+ }
+}
+
+WavAudioHandler::~WavAudioHandler() {
+}
+
+bool WavAudioHandler::AtEnd(size_t cursor) const {
+ return data_.size() <= cursor;
+}
+
+bool WavAudioHandler::CopyTo(AudioBus* bus,
+ size_t cursor,
+ size_t* bytes_written) const {
+ if (!bus)
+ return false;
+ if (bus->channels() != num_channels_) {
+ LOG(ERROR) << "Number of channel mismatch.";
+ return false;
+ }
+ if (AtEnd(cursor)) {
+ bus->Zero();
+ return true;
+ }
+ const int remaining_frames = (data_.size() - cursor) / bytes_per_frame_;
+ const int frames = std::min(bus->frames(), remaining_frames);
+ bus->FromInterleaved(data_.data() + cursor, frames, bytes_per_sample_);
+ *bytes_written = frames * bytes_per_frame_;
+ bus->ZeroFramesPartial(frames, bus->frames() - frames);
+ return true;
+}
+
+int WavAudioHandler::ParseSubChunk(const base::StringPiece& data) {
+ if (data.size() < kChunkHeaderSize)
+ return data.size();
+ uint32 chunk_length = ReadInt<uint32>(data, 4);
+ if (data.starts_with(kSubchunk1Id)) {
+ if (!ParseFmtChunk(data.substr(kChunkHeaderSize, chunk_length)))
+ return -1;
+ } else if (data.starts_with(kSubchunk2Id)) {
+ if (!ParseDataChunk(data.substr(kChunkHeaderSize, chunk_length)))
+ return -1;
+ } else {
+ LOG(ERROR) << "Unknown data chunk: " << data.substr(0, 4) << ".";
+ }
+ return chunk_length + kChunkHeaderSize;
+}
+
+bool WavAudioHandler::ParseFmtChunk(const base::StringPiece& data) {
+ if (data.size() < kFmtChunkMinimumSize) {
+ LOG(ERROR) << "Data size " << data.size() << " is too short.";
+ return false;
+ }
+ DCHECK_EQ(ReadInt<uint16>(data, kAudioFormatOffset), kAudioFormatPCM);
+ num_channels_ = ReadInt<uint16>(data, kChannelOffset);
+ sample_rate_ = ReadInt<uint32>(data, kSampleRateOffset);
+ byte_rate_ = ReadInt<uint32>(data, kByteRateOffset);
+ bits_per_sample_ = ReadInt<uint16>(data, kBitsPerSampleOffset);
+ bytes_per_sample_ = bits_per_sample_ >> 3;
+ bytes_per_frame_ = num_channels_ * bytes_per_sample_;
+ return true;
+}
+
+bool WavAudioHandler::ParseDataChunk(const base::StringPiece& data) {
+ data_ = data;
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sounds/wav_audio_handler.h b/chromium/media/audio/sounds/wav_audio_handler.h
new file mode 100644
index 00000000000..a2c3e023650
--- /dev/null
+++ b/chromium/media/audio/sounds/wav_audio_handler.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SOUNDS_WAV_AUDIO_HANDLER_H_
+#define MEDIA_AUDIO_SOUNDS_WAV_AUDIO_HANDLER_H_
+
+#include "base/strings/string_piece.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+
+// This class provides the input from wav file format. See
+// https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
+class MEDIA_EXPORT WavAudioHandler {
+ public:
+ explicit WavAudioHandler(const base::StringPiece& wav_data);
+ virtual ~WavAudioHandler();
+
+ // Returns true when cursor points to the end of the track.
+ bool AtEnd(size_t cursor) const;
+
+ // Copies the audio data to |bus| starting from the |cursor| and in
+ // the case of success stores the number of written bytes in
+ // |bytes_written|. |bytes_written| should not be NULL.
+ bool CopyTo(AudioBus* bus, size_t cursor, size_t* bytes_written) const;
+
+ int size() const { return data_.size(); }
+ uint16 num_channels() const { return num_channels_; }
+ uint32 sample_rate() const { return sample_rate_; }
+ uint32 byte_rate() const { return byte_rate_; }
+ uint16 bits_per_sample() const { return bits_per_sample_; }
+
+ private:
+ // Parses a chunk of wav format data. Returns the length of the chunk.
+ int ParseSubChunk(const base::StringPiece& data);
+
+ // Parses the 'fmt' section chunk and stores |params_|.
+ bool ParseFmtChunk(const base::StringPiece& data);
+
+ // Parses the 'data' section chunk and stores |data_|.
+ bool ParseDataChunk(const base::StringPiece& data);
+
+ // Data part of the |wav_data_|.
+ base::StringPiece data_;
+
+ uint16 num_channels_;
+ uint32 sample_rate_;
+ uint32 byte_rate_;
+ uint16 bits_per_sample_;
+ int bytes_per_sample_;
+ int bytes_per_frame_;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SOUNDS_WAV_AUDIO_HANDLER_H_
diff --git a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
new file mode 100644
index 00000000000..a7f8728be35
--- /dev/null
+++ b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_piece.h"
+#include "media/audio/sounds/test_data.h"
+#include "media/audio/sounds/wav_audio_handler.h"
+#include "media/base/audio_bus.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(WavAudioHandlerTest, SampleDataTest) {
+ WavAudioHandler handler(base::StringPiece(kTestAudioData,
+ arraysize(kTestAudioData)));
+ ASSERT_EQ(static_cast<uint16>(2), handler.num_channels());
+ ASSERT_EQ(static_cast<uint16>(16), handler.bits_per_sample());
+ ASSERT_EQ(static_cast<uint32>(48000), handler.sample_rate());
+ ASSERT_EQ(static_cast<uint32>(96000), handler.byte_rate());
+
+ ASSERT_EQ(4, handler.size());
+ scoped_ptr<AudioBus> bus = AudioBus::Create(
+ handler.num_channels(),
+ handler.size() / handler.num_channels());
+ size_t bytes_written;
+ ASSERT_TRUE(handler.CopyTo(bus.get(), 0, &bytes_written));
+ ASSERT_EQ(static_cast<size_t>(handler.size()), bytes_written);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/test_audio_input_controller_factory.cc b/chromium/media/audio/test_audio_input_controller_factory.cc
index d146231a25d..3aeb7773366 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.cc
+++ b/chromium/media/audio/test_audio_input_controller_factory.cc
@@ -57,11 +57,6 @@ AudioInputController* TestAudioInputControllerFactory::Create(
return controller_;
}
-void TestAudioInputControllerFactory::SetDelegateForTests(
- TestAudioInputControllerDelegate* delegate) {
- delegate_ = delegate;
-}
-
void TestAudioInputControllerFactory::OnTestAudioInputControllerDestroyed(
TestAudioInputController* controller) {
DCHECK_EQ(controller_, controller);
diff --git a/chromium/media/audio/test_audio_input_controller_factory.h b/chromium/media/audio/test_audio_input_controller_factory.h
index 4968c013d97..d49302280f2 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.h
+++ b/chromium/media/audio/test_audio_input_controller_factory.h
@@ -69,6 +69,10 @@ class TestAudioInputController : public AudioInputController {
// Ensure that the closure is run on the audio-manager thread.
virtual void Close(const base::Closure& closed_task) OVERRIDE;
+ const AudioParameters& audio_parameters() const {
+ return audio_parameters_;
+ }
+
protected:
virtual ~TestAudioInputController();
@@ -99,7 +103,9 @@ class TestAudioInputControllerFactory : public AudioInputController::Factory {
AudioParameters params,
UserInputMonitor* user_input_monitor) OVERRIDE;
- void SetDelegateForTests(TestAudioInputControllerDelegate* delegate);
+ void set_delegate(TestAudioInputControllerDelegate* delegate) {
+ delegate_ = delegate;
+ }
TestAudioInputController* controller() const { return controller_; }
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
index 8734cf2b78f..adbc9a82e4d 100644
--- a/chromium/media/audio/win/audio_device_listener_win.cc
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -11,7 +11,6 @@
#include "base/system_monitor/system_monitor.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/windows_version.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/core_audio_util_win.h"
using base::win::ScopedCoMem;
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index a174ea2ea0d..b16ef130a9f 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -7,7 +7,6 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/utf_string_conversions.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 11fad25d3fe..54bd3f71b26 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -100,10 +100,9 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
base::FilePath file_path;
EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_path));
file_path = file_path.AppendASCII(file_name);
- binary_file_ = file_util::OpenFile(file_path, "wb");
+ binary_file_ = base::OpenFile(file_path, "wb");
DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
- LOG(INFO) << ">> Output file: " << file_path.value()
- << " has been created.";
+ VLOG(0) << ">> Output file: " << file_path.value() << " has been created.";
}
virtual ~WriteToFileAudioSink() {
@@ -121,7 +120,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
buffer_.Seek(chunk_size);
bytes_written += chunk_size;
}
- file_util::CloseFile(binary_file_);
+ base::CloseFile(binary_file_);
}
// AudioInputStream::AudioInputCallback implementation.
@@ -265,7 +264,7 @@ class ScopedAudioInputStream {
// Verify that we can retrieve the current hardware/mixing sample rate
// for all available input devices.
TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -288,7 +287,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
// Test Create(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
ScopedAudioInputStream ais(
@@ -298,7 +297,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
// Test Open(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
ScopedAudioInputStream ais(
@@ -309,7 +308,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
// Test Open(), Start(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
ScopedAudioInputStream ais(
@@ -324,7 +323,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
// Test Open(), Start(), Stop(), Close() calling sequence.
TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
ScopedAudioInputStream ais(
@@ -340,7 +339,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
// Test some additional calling sequences.
TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
ScopedAudioInputStream ais(
@@ -372,7 +371,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
}
TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -454,7 +453,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// Test that we can capture loopback stream.
TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!audio_manager->HasAudioOutputDevices() || !CoreAudioUtil::IsSupported())
return;
@@ -488,7 +487,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
// environment variable to a value greater than 0.
TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -501,13 +500,13 @@ TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
ScopedAudioInputStream ais(aisw.Create());
EXPECT_TRUE(ais->Open());
- LOG(INFO) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
+ VLOG(0) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
WriteToFileAudioSink file_sink(file_name);
- LOG(INFO) << ">> Speak into the default microphone while recording.";
+ VLOG(0) << ">> Speak into the default microphone while recording.";
ais->Start(&file_sink);
base::PlatformThread::Sleep(TestTimeouts::action_timeout());
ais->Stop();
- LOG(INFO) << ">> Recording has stopped.";
+ VLOG(0) << ">> Recording has stopped.";
ais.Close();
}
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index c889c03ef2c..a10e67a46cb 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -25,23 +25,6 @@ using base::win::ScopedCoMem;
namespace media {
-typedef uint32 ChannelConfig;
-
-// Retrieves an integer mask which corresponds to the channel layout the
-// audio engine uses for its internal processing/mixing of shared-mode
-// streams. This mask indicates which channels are present in the multi-
-// channel stream. The least significant bit corresponds with the Front Left
-// speaker, the next least significant bit corresponds to the Front Right
-// speaker, and so on, continuing in the order defined in KsMedia.h.
-// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
-// for more details.
-static ChannelConfig GetChannelConfig() {
- WAVEFORMATPCMEX format;
- return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
- eRender, eConsole, &format)) ?
- static_cast<int>(format.dwChannelMask) : 0;
-}
-
// Compare two sets of audio parameters and return true if they are equal.
// Note that bits_per_sample() is excluded from this comparison since Core
// Audio can deal with most bit depths. As an example, if the native/mixing
@@ -55,40 +38,6 @@ static bool CompareAudioParametersNoBitDepthOrChannels(
a.frames_per_buffer() == b.frames_per_buffer());
}
-// Converts Microsoft's channel configuration to ChannelLayout.
-// This mapping is not perfect but the best we can do given the current
-// ChannelLayout enumerator and the Windows-specific speaker configurations
-// defined in ksmedia.h. Don't assume that the channel ordering in
-// ChannelLayout is exactly the same as the Windows specific configuration.
-// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
-// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
-// speakers are different in these two definitions.
-static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
- switch (config) {
- case KSAUDIO_SPEAKER_DIRECTOUT:
- return CHANNEL_LAYOUT_NONE;
- case KSAUDIO_SPEAKER_MONO:
- return CHANNEL_LAYOUT_MONO;
- case KSAUDIO_SPEAKER_STEREO:
- return CHANNEL_LAYOUT_STEREO;
- case KSAUDIO_SPEAKER_QUAD:
- return CHANNEL_LAYOUT_QUAD;
- case KSAUDIO_SPEAKER_SURROUND:
- return CHANNEL_LAYOUT_4_0;
- case KSAUDIO_SPEAKER_5POINT1:
- return CHANNEL_LAYOUT_5_1_BACK;
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- return CHANNEL_LAYOUT_5_1;
- case KSAUDIO_SPEAKER_7POINT1:
- return CHANNEL_LAYOUT_7_1_WIDE;
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- return CHANNEL_LAYOUT_7_1;
- default:
- VLOG(1) << "Unsupported channel layout: " << config;
- return CHANNEL_LAYOUT_UNSUPPORTED;
- }
-}
-
// static
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -98,19 +47,6 @@ AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
}
// static
-int WASAPIAudioOutputStream::HardwareChannelCount() {
- WAVEFORMATPCMEX format;
- return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
- eRender, eConsole, &format)) ?
- static_cast<int>(format.Format.nChannels) : 0;
-}
-
-// static
-ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
- return ChannelConfigToChannelLayout(GetChannelConfig());
-}
-
-// static
int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
WAVEFORMATPCMEX format;
ScopedComPtr<IAudioClient> client;
@@ -135,9 +71,12 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
ERole device_role)
: creating_thread_id_(base::PlatformThread::CurrentId()),
manager_(manager),
+ format_(),
opened_(false),
audio_parameters_are_valid_(false),
volume_(1.0),
+ packet_size_frames_(0),
+ packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
device_id_(device_id),
device_role_(device_role),
@@ -187,18 +126,18 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
// Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
format_.Samples.wValidBitsPerSample = params.bits_per_sample();
- format_.dwChannelMask = GetChannelConfig();
+ format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender);
format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
// Store size (in different units) of audio packets which we expect to
// get from the audio endpoint device in each render event.
packet_size_frames_ = params.frames_per_buffer();
packet_size_bytes_ = params.GetBytesPerBuffer();
- packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
- VLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
+ VLOG(1) << "Number of milliseconds per packet: "
+ << params.GetBufferDuration().InMillisecondsF();
// All events are auto-reset events and non-signaled initially.
@@ -298,6 +237,13 @@ bool WASAPIAudioOutputStream::Open() {
audio_client_ = audio_client;
audio_render_client_ = audio_render_client;
+ hr = audio_client_->GetService(__uuidof(IAudioClock),
+ audio_clock_.ReceiveVoid());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get IAudioClock service.";
+ return false;
+ }
+
opened_ = true;
return true;
}
@@ -315,6 +261,17 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
source_ = callback;
+ // Ensure that the endpoint buffer is prepared with silence.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_client_, audio_render_client_)) {
+ LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
+ callback->OnError(this);
+ return;
+ }
+ }
+ num_written_frames_ = endpoint_buffer_size_frames_;
+
// Create and start the thread that will drive the rendering by waiting for
// render events.
render_thread_.reset(
@@ -322,26 +279,18 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
render_thread_->Start();
if (!render_thread_->HasBeenStarted()) {
LOG(ERROR) << "Failed to start WASAPI render thread.";
+ StopThread();
+ callback->OnError(this);
return;
}
- // Ensure that the endpoint buffer is prepared with silence.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_client_, audio_render_client_)) {
- LOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
- return;
- }
- }
- num_written_frames_ = endpoint_buffer_size_frames_;
-
// Start streaming data between the endpoint buffer and the audio engine.
HRESULT hr = audio_client_->Start();
if (FAILED(hr)) {
- SetEvent(stop_render_event_.Get());
- render_thread_->Join();
- render_thread_.reset();
- HandleError(hr);
+ LOG_GETLASTERROR(ERROR)
+ << "Failed to start output streaming: " << std::hex << hr;
+ StopThread();
+ callback->OnError(this);
}
}
@@ -354,27 +303,21 @@ void WASAPIAudioOutputStream::Stop() {
// Stop output audio streaming.
HRESULT hr = audio_client_->Stop();
if (FAILED(hr)) {
- LOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ LOG_GETLASTERROR(ERROR)
<< "Failed to stop output streaming: " << std::hex << hr;
+ source_->OnError(this);
}
- // Wait until the thread completes and perform cleanup.
- SetEvent(stop_render_event_.Get());
- render_thread_->Join();
- render_thread_.reset();
-
- // Ensure that we don't quit the main thread loop immediately next
- // time Start() is called.
- ResetEvent(stop_render_event_.Get());
-
- // Clear source callback, it'll be set again on the next Start() call.
- source_ = NULL;
+ // Make a local copy of |source_| since StopThread() will clear it.
+ AudioSourceCallback* callback = source_;
+ StopThread();
// Flush all pending data and reset the audio clock stream position to 0.
hr = audio_client_->Reset();
if (FAILED(hr)) {
- LOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ LOG_GETLASTERROR(ERROR)
<< "Failed to reset streaming: " << std::hex << hr;
+ callback->OnError(this);
}
// Extra safety check to ensure that the buffers are cleared.
@@ -443,17 +386,9 @@ void WASAPIAudioOutputStream::Run() {
audio_samples_render_event_ };
UINT64 device_frequency = 0;
- // The IAudioClock interface enables us to monitor a stream's data
- // rate and the current position in the stream. Allocate it before we
- // start spinning.
- ScopedComPtr<IAudioClock> audio_clock;
- hr = audio_client_->GetService(__uuidof(IAudioClock),
- audio_clock.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // The device frequency is the frequency generated by the hardware clock in
- // the audio device. The GetFrequency() method reports a constant frequency.
- hr = audio_clock->GetFrequency(&device_frequency);
- }
+ // The device frequency is the frequency generated by the hardware clock in
+ // the audio device. The GetFrequency() method reports a constant frequency.
+ hr = audio_clock_->GetFrequency(&device_frequency);
error = FAILED(hr);
PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
<< std::hex << hr;
@@ -474,7 +409,7 @@ void WASAPIAudioOutputStream::Run() {
break;
case WAIT_OBJECT_0 + 1:
// |audio_samples_render_event_| has been set.
- RenderAudioFromSource(audio_clock, device_frequency);
+ error = !RenderAudioFromSource(device_frequency);
break;
default:
error = true;
@@ -496,8 +431,7 @@ void WASAPIAudioOutputStream::Run() {
}
}
-void WASAPIAudioOutputStream::RenderAudioFromSource(
- IAudioClock* audio_clock, UINT64 device_frequency) {
+bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
TRACE_EVENT0("audio", "RenderAudioFromSource");
HRESULT hr = S_FALSE;
@@ -518,7 +452,7 @@ void WASAPIAudioOutputStream::RenderAudioFromSource(
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to retrieve amount of available space: "
<< std::hex << hr;
- return;
+ return false;
}
} else {
// While the stream is running, the system alternately sends one
@@ -536,7 +470,7 @@ void WASAPIAudioOutputStream::RenderAudioFromSource(
// Check if there is enough available space to fit the packet size
// specified by the client.
if (num_available_frames < packet_size_frames_)
- return;
+ return true;
DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0)
<< "Non-perfect timing detected (num_available_frames="
@@ -559,7 +493,7 @@ void WASAPIAudioOutputStream::RenderAudioFromSource(
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to use rendering audio buffer: "
<< std::hex << hr;
- return;
+ return false;
}
// Derive the audio delay which corresponds to the delay between
@@ -569,7 +503,7 @@ void WASAPIAudioOutputStream::RenderAudioFromSource(
// unit at the render side.
UINT64 position = 0;
int audio_delay_bytes = 0;
- hr = audio_clock->GetPosition(&position, NULL);
+ hr = audio_clock_->GetPosition(&position, NULL);
if (SUCCEEDED(hr)) {
// Stream position of the sample that is currently playing
// through the speaker.
@@ -617,14 +551,8 @@ void WASAPIAudioOutputStream::RenderAudioFromSource(
num_written_frames_ += packet_size_frames_;
}
-}
-void WASAPIAudioOutputStream::HandleError(HRESULT err) {
- CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) ||
- (!started() && GetCurrentThreadId() == creating_thread_id_));
- NOTREACHED() << "Error code: " << std::hex << err;
- if (source_)
- source_->OnError(this);
+ return true;
}
HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
@@ -706,4 +634,22 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
return hr;
}
+void WASAPIAudioOutputStream::StopThread() {
+ if (render_thread_ ) {
+ if (render_thread_->HasBeenStarted()) {
+ // Wait until the thread completes and perform cleanup.
+ SetEvent(stop_render_event_.Get());
+ render_thread_->Join();
+ }
+
+ render_thread_.reset();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_render_event_.Get());
+ }
+
+ source_ = NULL;
+}
+
} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 7884d8840f7..2baf6f1ac9a 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -138,17 +138,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
virtual void SetVolume(double volume) OVERRIDE;
virtual void GetVolume(double* volume) OVERRIDE;
- // Retrieves the number of channels the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- static int HardwareChannelCount();
-
- // Retrieves the channel layout the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- // Note that we convert an internal channel layout mask (see ChannelMask())
- // into a Chrome-specific channel layout enumerator in this method, hence
- // the match might not be perfect.
- static ChannelLayout HardwareChannelLayout();
-
// Retrieves the sample rate the audio engine uses for its internal
// processing/mixing of shared-mode streams. To fetch the settings for the
// default device, pass an empty string as the |device_id|.
@@ -168,10 +157,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Checks available amount of space in the endpoint buffer and reads
// data from the client to fill up the buffer without causing audio
// glitches.
- void RenderAudioFromSource(IAudioClock* audio_clock, UINT64 device_frequency);
-
- // Issues the OnError() callback to the |sink_|.
- void HandleError(HRESULT err);
+ bool RenderAudioFromSource(UINT64 device_frequency);
// Called when the device will be opened in exclusive mode and use the
// application specified format.
@@ -181,6 +167,11 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
HANDLE event_handle,
uint32* endpoint_buffer_size);
+ // If |render_thread_| is valid, sets |stop_render_event_| and blocks until
+ // the thread has stopped. |stop_render_event_| is reset after the call.
+ // |source_| is set to NULL.
+ void StopThread();
+
// Contains the thread ID of the creating thread.
base::PlatformThreadId creating_thread_id_;
@@ -215,9 +206,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Size in bytes of each audio packet.
size_t packet_size_bytes_;
- // Size in milliseconds of each audio packet.
- float packet_size_ms_;
-
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
@@ -238,9 +226,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Pointer to the client that will deliver audio samples to be played out.
AudioSourceCallback* source_;
- // An IMMDeviceEnumerator interface which represents a device enumerator.
- base::win::ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
-
// An IAudioClient interface which enables a client to create and initialize
// an audio stream between an audio application and the audio engine.
base::win::ScopedComPtr<IAudioClient> audio_client_;
@@ -259,6 +244,8 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Container for retrieving data from AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> audio_bus_;
+ base::win::ScopedComPtr<IAudioClock> audio_clock_;
+
DISALLOW_COPY_AND_ASSIGN(WASAPIAudioOutputStream);
};
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 1f78facf91d..5fda4b14509 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -16,7 +16,6 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/decoder_buffer.h"
@@ -98,7 +97,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
EXPECT_TRUE(!text_file_);
- text_file_ = file_util::OpenFile(file_name, "wt");
+ text_file_ = base::OpenFile(file_name, "wt");
DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
// Write the array which contains delta times to a text file.
@@ -108,7 +107,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
++elements_written;
}
- file_util::CloseFile(text_file_);
+ base::CloseFile(text_file_);
}
// AudioOutputStream::AudioSourceCallback implementation.
@@ -261,7 +260,7 @@ static AudioOutputStream* CreateDefaultAudioOutputStream(
TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Skip this test in exclusive mode since the resulting rate is only utilized
// for shared mode streams.
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()) || ExclusiveModeIsEnabled())
return;
@@ -274,7 +273,7 @@ TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
// Test Create(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
@@ -283,7 +282,7 @@ TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
// Test Open(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
@@ -293,7 +292,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
// Test Open(), Start(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
@@ -307,7 +306,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
// Test Open(), Start(), Stop(), Close() calling sequence.
TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
@@ -322,7 +321,7 @@ TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
// Test SetVolume(), GetVolume()
TEST(WASAPIAudioOutputStreamTest, Volume) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
@@ -359,7 +358,7 @@ TEST(WASAPIAudioOutputStreamTest, Volume) {
// Test some additional calling sequences.
TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -399,7 +398,7 @@ TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
// Use preferred packet size and verify that rendering starts.
TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -435,7 +434,7 @@ TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
// Use a non-preferred packet size and verify that Open() fails.
TEST(WASAPIAudioOutputStreamTest, InvalidPacketSize) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -462,7 +461,7 @@ TEST(WASAPIAudioOutputStreamTest, InvalidPacketSize) {
// environment variable to a value greater than 0.
// The test files are approximately 20 seconds long.
TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -484,13 +483,13 @@ TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
}
ReadFromFileAudioSource file_source(file_name);
- LOG(INFO) << "File name : " << file_name.c_str();
- LOG(INFO) << "Sample rate : " << aosw.sample_rate();
- LOG(INFO) << "Bits per sample: " << aosw.bits_per_sample();
- LOG(INFO) << "#channels : " << aosw.channels();
- LOG(INFO) << "File size : " << file_source.file_size();
- LOG(INFO) << "#file segments : " << kNumFileSegments;
- LOG(INFO) << ">> Listen to the stereo file while playing...";
+ VLOG(0) << "File name : " << file_name.c_str();
+ VLOG(0) << "Sample rate : " << aosw.sample_rate();
+ VLOG(0) << "Bits per sample: " << aosw.bits_per_sample();
+ VLOG(0) << "#channels : " << aosw.channels();
+ VLOG(0) << "File size : " << file_source.file_size();
+ VLOG(0) << "#file segments : " << kNumFileSegments;
+ VLOG(0) << ">> Listen to the stereo file while playing...";
for (int i = 0; i < kNumFileSegments; i++) {
// Each segment will start with a short (~20ms) block of zeros, hence
@@ -503,7 +502,7 @@ TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
aos->Stop();
}
- LOG(INFO) << ">> Stereo file playout has stopped.";
+ VLOG(0) << ">> Stereo file playout has stopped.";
aos->Close();
}
@@ -515,7 +514,7 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt48kHz) {
if (!ExclusiveModeIsEnabled())
return;
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -566,7 +565,7 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt44kHz) {
if (!ExclusiveModeIsEnabled())
return;
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -624,7 +623,7 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt48kHz) {
if (!ExclusiveModeIsEnabled())
return;
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
@@ -665,7 +664,7 @@ TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt44kHz) {
if (!ExclusiveModeIsEnabled())
return;
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunAudioTests(audio_manager.get()))
return;
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index 0352e6677d2..242813a8c65 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -20,8 +20,8 @@
#include "base/process/launch.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
+#include "base/win/windows_version.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/audio_device_listener_win.h"
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/audio_low_latency_output_win.h"
@@ -73,8 +73,8 @@ static int GetVersionPartAsInt(DWORDLONG num) {
// Returns a string containing the given device's description and installed
// driver version.
-static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
- SP_DEVINFO_DATA* device_data) {
+static base::string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
+ SP_DEVINFO_DATA* device_data) {
// Save the old install params setting and set a flag for the
// SetupDiBuildDriverInfoList below to return only the installed drivers.
SP_DEVINSTALL_PARAMS old_device_install_params;
@@ -88,13 +88,13 @@ static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
SP_DRVINFO_DATA driver_data;
driver_data.cbSize = sizeof(driver_data);
- string16 device_and_driver_info;
+ base::string16 device_and_driver_info;
if (SetupDiBuildDriverInfoList(device_info, device_data,
SPDIT_COMPATDRIVER)) {
if (SetupDiEnumDriverInfo(device_info, device_data, SPDIT_COMPATDRIVER, 0,
&driver_data)) {
DWORDLONG version = driver_data.DriverVersion;
- device_and_driver_info = string16(driver_data.Description) + L" v" +
+ device_and_driver_info = base::string16(driver_data.Description) + L" v" +
base::IntToString16(GetVersionPartAsInt((version >> 48))) + L"." +
base::IntToString16(GetVersionPartAsInt((version >> 32))) + L"." +
base::IntToString16(GetVersionPartAsInt((version >> 16))) + L"." +
@@ -109,7 +109,26 @@ static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
return device_and_driver_info;
}
-AudioManagerWin::AudioManagerWin() {
+static int NumberOfWaveOutBuffers() {
+ // Use the user provided buffer count if provided.
+ int buffers = 0;
+ std::string buffers_str(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kWaveOutBuffers));
+ if (base::StringToInt(buffers_str, &buffers) && buffers > 0) {
+ return buffers;
+ }
+
+ // Use 4 buffers for Vista, 3 for everyone else:
+ // - The entire Windows audio stack was rewritten for Windows Vista and wave
+ // out performance was degraded compared to XP.
+ // - The regression was fixed in Windows 7 and most configurations will work
+ // with 2, but some (e.g., some Sound Blasters) still need 3.
+ // - Some XP configurations (even multi-processor ones) also need 3.
+ return (base::win::GetVersion() == base::win::VERSION_VISTA) ? 4 : 3;
+}
+
+AudioManagerWin::AudioManagerWin(AudioLogFactory* audio_log_factory)
+ : AudioManagerBase(audio_log_factory) {
if (!CoreAudioUtil::IsSupported()) {
// Use the Wave API for device enumeration if XP or lower.
enumeration_type_ = kWaveEnumeration;
@@ -157,7 +176,7 @@ void AudioManagerWin::DestroyDeviceListener() {
output_device_listener_.reset();
}
-string16 AudioManagerWin::GetAudioInputDeviceModel() {
+base::string16 AudioManagerWin::GetAudioInputDeviceModel() {
// Get the default audio capture device and its device interface name.
DWORD device_id = 0;
waveInMessage(reinterpret_cast<HWAVEIN>(WAVE_MAPPER),
@@ -167,13 +186,13 @@ string16 AudioManagerWin::GetAudioInputDeviceModel() {
waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
DRV_QUERYDEVICEINTERFACESIZE,
reinterpret_cast<DWORD_PTR>(&device_interface_name_size), 0);
- size_t bytes_in_char16 = sizeof(string16::value_type);
+ size_t bytes_in_char16 = sizeof(base::string16::value_type);
DCHECK_EQ(0u, device_interface_name_size % bytes_in_char16);
if (device_interface_name_size <= bytes_in_char16)
- return string16(); // No audio capture device.
+ return base::string16(); // No audio capture device.
- string16 device_interface_name;
- string16::value_type* name_ptr = WriteInto(&device_interface_name,
+ base::string16 device_interface_name;
+ base::string16::value_type* name_ptr = WriteInto(&device_interface_name,
device_interface_name_size / bytes_in_char16);
waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
DRV_QUERYDEVICEINTERFACE,
@@ -185,7 +204,7 @@ string16 AudioManagerWin::GetAudioInputDeviceModel() {
HDEVINFO device_info = SetupDiGetClassDevs(
&AM_KSCATEGORY_AUDIO, 0, 0, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
if (device_info == INVALID_HANDLE_VALUE)
- return string16();
+ return base::string16();
DWORD interface_index = 0;
SP_DEVICE_INTERFACE_DATA interface_data;
@@ -210,7 +229,7 @@ string16 AudioManagerWin::GetAudioInputDeviceModel() {
interface_detail,
interface_detail_size, NULL,
&device_data))
- return string16();
+ return base::string16();
bool device_found = (device_interface_name == interface_detail->DevicePath);
@@ -218,7 +237,7 @@ string16 AudioManagerWin::GetAudioInputDeviceModel() {
return GetDeviceAndDriverInfo(device_info, &device_data);
}
- return string16();
+ return base::string16();
}
void AudioManagerWin::ShowAudioInputSettings() {
@@ -337,7 +356,8 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
if (!CoreAudioUtil::IsSupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
- DLOG_IF(ERROR, !device_id.empty())
+ DLOG_IF(ERROR, !device_id.empty() &&
+ device_id != AudioManagerBase::kDefaultDeviceId)
<< "Opening by device id not supported by PCMWaveOutAudioOutputStream";
DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
return new PCMWaveOutAudioOutputStream(
@@ -347,12 +367,19 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
// TODO(rtoy): support more than stereo input.
if (params.input_channels() > 0) {
DVLOG(1) << "WASAPIUnifiedStream is created.";
- DLOG_IF(ERROR, !device_id.empty())
+ DLOG_IF(ERROR, !device_id.empty() &&
+ device_id != AudioManagerBase::kDefaultDeviceId)
<< "Opening by device id not supported by WASAPIUnifiedStream";
return new WASAPIUnifiedStream(this, params, input_device_id);
}
- return new WASAPIAudioOutputStream(this, device_id, params, eConsole);
+ // Pass an empty string to indicate that we want the default device
+ // since we consistently only check for an empty string in
+ // WASAPIAudioOutputStream.
+ return new WASAPIAudioOutputStream(this,
+ device_id == AudioManagerBase::kDefaultDeviceId ?
+ std::string() : device_id,
+ params, eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -429,21 +456,25 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
}
if (input_params.IsValid()) {
+ // If the user has enabled checking supported channel layouts or we don't
+ // have a valid channel layout yet, try to use the input layout. See bugs
+ // http://crbug.com/259165 and http://crbug.com/311906 for more details.
if (core_audio_supported &&
- cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts)) {
+ (cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) ||
+ channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)) {
// Check if it is possible to open up at the specified input channel
// layout but avoid checking if the specified layout is the same as the
// hardware (preferred) layout. We do this extra check to avoid the
// CoreAudioUtil::IsChannelLayoutSupported() overhead in most cases.
if (input_params.channel_layout() != channel_layout) {
- // TODO(henrika): Use |output_device_id| here.
- // Internally, IsChannelLayoutSupported does many of the operations
- // that have already been done such as opening up a client and fetching
- // the WAVEFORMATPCMEX format. Ideally we should only do that once and
- // do it for the requested device. Then here, we can check the layout
- // from the data we already hold.
+ // TODO(henrika): Internally, IsChannelLayoutSupported does many of the
+ // operations that have already been done such as opening up a client
+ // and fetching the WAVEFORMATPCMEX format. Ideally we should only do
+ // that once. Then here, we can check the layout from the data we
+ // already hold.
if (CoreAudioUtil::IsChannelLayoutSupported(
- eRender, eConsole, input_params.channel_layout())) {
+ output_device_id, eRender, eConsole,
+ input_params.channel_layout())) {
// Open up using the same channel layout as the source if it is
// supported by the hardware.
channel_layout = input_params.channel_layout();
@@ -472,7 +503,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size);
+ sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
@@ -494,8 +525,8 @@ AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
}
/// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerWin();
+AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
+ return new AudioManagerWin(audio_log_factory);
}
} // namespace media
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
index 86e22badc5f..01044da40a0 100644
--- a/chromium/media/audio/win/audio_manager_win.h
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -18,12 +18,12 @@ class AudioDeviceListenerWin;
// the AudioManager class.
class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
public:
- AudioManagerWin();
+ AudioManagerWin(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
virtual bool HasAudioOutputDevices() OVERRIDE;
virtual bool HasAudioInputDevices() OVERRIDE;
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+ virtual base::string16 GetAudioInputDeviceModel() OVERRIDE;
virtual void ShowAudioInputSettings() OVERRIDE;
virtual void GetAudioInputDeviceNames(
AudioDeviceNames* device_names) OVERRIDE;
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
index 7ce146b0ab4..2b8036d52a2 100644
--- a/chromium/media/audio/win/audio_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -14,7 +14,6 @@
#include "base/win/windows_version.h"
#include "media/base/limits.h"
#include "media/audio/audio_io.h"
-#include "media/audio/audio_util.h"
#include "media/audio/audio_manager.h"
#include "media/audio/simple_sources.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -176,7 +175,7 @@ class ReadOnlyMappedFile {
// Test that can it be created and closed.
TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -192,7 +191,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
// Test that can it be cannot be created with invalid parameters.
TEST(WinAudioTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -228,7 +227,7 @@ TEST(WinAudioTest, SanityOnMakeParams) {
// Test that it can be opened and closed.
TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -245,7 +244,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
// Test that it has a maximum packet size.
TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -264,7 +263,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
// time. The actual EXPECT_GT are mostly meaningless and the real test is that
// the test completes in reasonable time.
TEST(WinAudioTest, PCMWaveSlowSource) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -292,7 +291,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
// gets paused. This test is best when run over RDP with audio enabled. See
// bug 19276 for more details.
TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -323,7 +322,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
// device at 44.1K s/sec. Parameters have been chosen carefully so you should
// not hear pops or noises while the sound is playing.
TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -351,7 +350,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
// not hear pops or noises while the sound is playing. The audio also should
// sound with a lower volume than PCMWaveStreamPlay200HzTone44Kss.
TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -386,7 +385,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
// try hard to generate situation where the two threads are accessing the
// object roughly at the same time.
TEST(WinAudioTest, PushSourceFile16KHz) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -429,7 +428,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
// stopped. You will here two .5 seconds wave signal separated by 0.5 seconds
// of silence.
TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -466,7 +465,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
// higher and Wave is used for XP and lower. It is possible to utilize a
// smaller buffer size for WASAPI than for Wave.
TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -510,7 +509,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
// Check that the pending bytes value is correct what the stream starts.
TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
@@ -539,28 +538,22 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes, 0)))
.WillOnce(Invoke(MockAudioSource::ClearData));
- switch (NumberOfWaveOutBuffers()) {
- case 2:
- break; // Calls are the same as at end of 3-buffer scheme.
- case 3:
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- 2 * bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- 2 * bytes_100_ms)))
- .Times(AnyNumber())
- .WillRepeatedly(Return(0));
- break;
- default:
- ASSERT_TRUE(false)
- << "Unexpected number of buffers: " << NumberOfWaveOutBuffers();
- }
+
+ // Note: If AudioManagerWin::NumberOfWaveOutBuffers() ever changes, or if this
+ // test is run on Vista, these expectations will fail.
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ bytes_100_ms)))
+ .WillOnce(Invoke(MockAudioSource::ClearData));
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ 2 * bytes_100_ms)))
+ .WillOnce(Invoke(MockAudioSource::ClearData));
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ 2 * bytes_100_ms)))
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(0));
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
bytes_100_ms)))
@@ -667,7 +660,7 @@ DWORD __stdcall SyncSocketThread(void* context) {
// related to the two different audio-layers for AUDIO_PCM_LOW_LATENCY.
// In this test you should hear a continuous 200Hz tone for 2 seconds.
TEST(WinAudioTest, SyncSocketBasic) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
if (!audio_man->HasAudioOutputDevices()) {
LOG(WARNING) << "No output device detected.";
return;
diff --git a/chromium/media/audio/win/audio_unified_win.cc b/chromium/media/audio/win/audio_unified_win.cc
index 5c1594ef8f8..901c8b897fa 100644
--- a/chromium/media/audio/win/audio_unified_win.cc
+++ b/chromium/media/audio/win/audio_unified_win.cc
@@ -51,23 +51,6 @@ static const char kUnifiedAudioDebugFileName[] = "unified_win_debug.txt";
static const char kUnifiedAudioParamsFileName[] = "unified_win_params.txt";
#endif
-typedef uint32 ChannelConfig;
-
-// Retrieves an integer mask which corresponds to the channel layout the
-// audio engine uses for its internal processing/mixing of shared-mode
-// streams. This mask indicates which channels are present in the multi-
-// channel stream. The least significant bit corresponds with the Front Left
-// speaker, the next least significant bit corresponds to the Front Right
-// speaker, and so on, continuing in the order defined in KsMedia.h.
-// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
-// for more details.
-static ChannelConfig GetChannelConfig(EDataFlow data_flow) {
- WAVEFORMATPCMEX format;
- return SUCCEEDED(media::CoreAudioUtil::GetDefaultSharedModeMixFormat(
- data_flow, eConsole, &format)) ?
- static_cast<int>(format.dwChannelMask) : 0;
-}
-
// Use the acquired IAudioClock interface to derive a time stamp of the audio
// sample which is currently playing through the speakers.
static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
@@ -178,7 +161,7 @@ WASAPIUnifiedStream::~WASAPIUnifiedStream() {
base::FilePath data_file_name;
PathService::Get(base::DIR_EXE, &data_file_name);
data_file_name = data_file_name.AppendASCII(kUnifiedAudioDebugFileName);
- data_file_ = file_util::OpenFile(data_file_name, "wt");
+ data_file_ = base::OpenFile(data_file_name, "wt");
DVLOG(1) << ">> Output file " << data_file_name.value() << " is created.";
size_t n = 0;
@@ -192,16 +175,16 @@ WASAPIUnifiedStream::~WASAPIUnifiedStream() {
fifo_rate_comps_[n]);
++n;
}
- file_util::CloseFile(data_file_);
+ base::CloseFile(data_file_);
base::FilePath param_file_name;
PathService::Get(base::DIR_EXE, &param_file_name);
param_file_name = param_file_name.AppendASCII(kUnifiedAudioParamsFileName);
- param_file_ = file_util::OpenFile(param_file_name, "wt");
+ param_file_ = base::OpenFile(param_file_name, "wt");
DVLOG(1) << ">> Output file " << param_file_name.value() << " is created.";
fprintf(param_file_, "%d %d\n", input_params_[0], input_params_[1]);
fprintf(param_file_, "%d %d\n", output_params_[0], output_params_[1]);
- file_util::CloseFile(param_file_);
+ base::CloseFile(param_file_);
#endif
}
@@ -575,8 +558,9 @@ void WASAPIUnifiedStream::SetIOFormats(const AudioParameters& input_params,
// Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
// Note that we always open up using the native channel layout.
(*xformat).Samples.wValidBitsPerSample = format->wBitsPerSample;
- (*xformat).dwChannelMask = (n == 0) ?
- GetChannelConfig(eCapture) : GetChannelConfig(eRender);
+ (*xformat).dwChannelMask =
+ CoreAudioUtil::GetChannelConfig(
+ std::string(), n == 0 ? eCapture : eRender);
(*xformat).SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
}
diff --git a/chromium/media/audio/win/audio_unified_win_unittest.cc b/chromium/media/audio/win/audio_unified_win_unittest.cc
index 011c36348b5..15573aec76a 100644
--- a/chromium/media/audio/win/audio_unified_win_unittest.cc
+++ b/chromium/media/audio/win/audio_unified_win_unittest.cc
@@ -12,7 +12,6 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/audio_unified_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/channel_mixer.h"
@@ -75,9 +74,9 @@ class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
EXPECT_TRUE(!text_file_);
- text_file_ = file_util::OpenFile(file_name, "wt");
+ text_file_ = base::OpenFile(file_name, "wt");
DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
- LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
+ VLOG(0) << ">> Output file " << file_name.value() << " has been created.";
// Write the array which contains delta times to a text file.
size_t elements_written = 0;
@@ -85,7 +84,7 @@ class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
fprintf(text_file_, "%d\n", delta_times_[elements_written]);
++elements_written;
}
- file_util::CloseFile(text_file_);
+ base::CloseFile(text_file_);
}
virtual int OnMoreData(AudioBus* dest,
@@ -264,7 +263,7 @@ static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
// Test Open(), Close() calling sequence.
TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunUnifiedAudioTests(audio_manager.get()))
return;
@@ -275,7 +274,7 @@ TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
// Test Open(), Close() calling sequence for all available capture devices.
TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunUnifiedAudioTests(audio_manager.get()))
return;
@@ -292,7 +291,7 @@ TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
// Test Open(), Start(), Close() calling sequence.
TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunUnifiedAudioTests(audio_manager.get()))
return;
@@ -312,7 +311,7 @@ TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
// Verify that IO callbacks starts as they should.
TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunUnifiedAudioTests(audio_manager.get()))
return;
@@ -348,7 +347,7 @@ TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
// back to the speaker. This test allows the user to verify that the audio
// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!CanRunUnifiedAudioTests(audio_manager.get()))
return;
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index 4adfdda090a..790b2b140f7 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -25,8 +25,6 @@ namespace media {
enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
-typedef uint32 ChannelConfig;
-
// Converts Microsoft's channel configuration to ChannelLayout.
// This mapping is not perfect but the best we can do given the current
// ChannelLayout enumerator and the Windows-specific speaker configurations
@@ -401,7 +399,7 @@ std::string CoreAudioUtil::GetMatchingOutputDeviceID(
ScopedComPtr<IMMDevice> output_device;
for (UINT i = 0; i < count; ++i) {
collection->Item(i, output_device.Receive());
- std::string output_controller_id(CoreAudioUtil::GetAudioControllerID(
+ std::string output_controller_id(GetAudioControllerID(
output_device, enumerator));
if (output_controller_id == controller_id)
break;
@@ -478,6 +476,18 @@ ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
ScopedComPtr<IAudioClient>());
}
+ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
+ const std::string& device_id, EDataFlow data_flow, ERole role) {
+ if (device_id.empty())
+ return CreateDefaultClient(data_flow, role);
+
+ ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
+ if (!device)
+ return ScopedComPtr<IAudioClient>();
+
+ return CreateClient(device);
+}
+
HRESULT CoreAudioUtil::GetSharedModeMixFormat(
IAudioClient* client, WAVEFORMATPCMEX* format) {
DCHECK(IsSupported());
@@ -496,18 +506,6 @@ HRESULT CoreAudioUtil::GetSharedModeMixFormat(
return hr;
}
-HRESULT CoreAudioUtil::GetDefaultSharedModeMixFormat(
- EDataFlow data_flow, ERole role, WAVEFORMATPCMEX* format) {
- DCHECK(IsSupported());
- ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
- if (!client) {
- // Map NULL-pointer to new error code which can be different from the
- // actual error code. The exact value is not important here.
- return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
- }
- return CoreAudioUtil::GetSharedModeMixFormat(client, format);
-}
-
bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
const WAVEFORMATPCMEX* format) {
@@ -529,18 +527,20 @@ bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
return (hr == S_OK);
}
-bool CoreAudioUtil::IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
+bool CoreAudioUtil::IsChannelLayoutSupported(const std::string& device_id,
+ EDataFlow data_flow,
+ ERole role,
ChannelLayout channel_layout) {
DCHECK(IsSupported());
// First, get the preferred mixing format for shared mode streams.
- ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
+ ScopedComPtr<IAudioClient> client(CreateClient(device_id, data_flow, role));
if (!client)
return false;
WAVEFORMATPCMEX format;
- HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format);
+ HRESULT hr = GetSharedModeMixFormat(client, &format);
if (FAILED(hr))
return false;
@@ -623,6 +623,16 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
// Convert Microsoft's channel configuration to genric ChannelLayout.
ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
+ // Some devices don't appear to set a valid channel layout, so guess based on
+ // the number of channels. See http://crbug.com/311906.
+ if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
+ VLOG(1) << "Unsupported channel config: "
+ << std::hex << channel_config
+ << ". Guessing layout by channel count: "
+ << std::dec << mix_format.Format.nChannels;
+ channel_layout = GuessChannelLayout(mix_format.Format.nChannels);
+ }
+
// Preferred sample rate.
int sample_rate = mix_format.Format.nSamplesPerSec;
@@ -684,6 +694,18 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
return GetPreferredAudioParameters(client, params);
}
+ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
+ EDataFlow data_flow) {
+ ScopedComPtr<IAudioClient> client(
+ CreateClient(device_id, data_flow, eConsole));
+
+ WAVEFORMATPCMEX format = {0};
+ if (!client || FAILED(GetSharedModeMixFormat(client, &format)))
+ return 0;
+
+ return static_cast<ChannelConfig>(format.dwChannelMask);
+}
+
HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
const WAVEFORMATPCMEX* format,
HANDLE event_handle,
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index cdf6dfb11df..a210af906ea 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -26,6 +26,12 @@ using base::win::ScopedComPtr;
namespace media {
+
+// Represents audio channel configuration constants as understood by Windows.
+// E.g. KSAUDIO_SPEAKER_MONO. For a list of possible values see:
+// http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
+typedef uint32 ChannelConfig;
+
class MEDIA_EXPORT CoreAudioUtil {
public:
// Returns true if Windows Core Audio is supported.
@@ -106,7 +112,7 @@ class MEDIA_EXPORT CoreAudioUtil {
// manage the flow of audio data between the application and an audio endpoint
// device.
- // Create an IAudioClient interface for the default IMMDevice where
+ // Create an IAudioClient instance for the default IMMDevice where
// flow direction and role is define by |data_flow| and |role|.
// The IAudioClient interface enables a client to create and initialize an
// audio stream between an audio application and the audio engine (for a
@@ -115,6 +121,12 @@ class MEDIA_EXPORT CoreAudioUtil {
static ScopedComPtr<IAudioClient> CreateDefaultClient(EDataFlow data_flow,
ERole role);
+ // Create an IAudioClient instance for a specific device _or_ the default
+ // device if |device_id| is empty.
+ static ScopedComPtr<IAudioClient> CreateClient(const std::string& device_id,
+ EDataFlow data_flow,
+ ERole role);
+
// Create an IAudioClient interface for an existing IMMDevice given by
// |audio_device|. Flow direction and role is define by the |audio_device|.
static ScopedComPtr<IAudioClient> CreateClient(IMMDevice* audio_device);
@@ -126,13 +138,6 @@ class MEDIA_EXPORT CoreAudioUtil {
static HRESULT GetSharedModeMixFormat(IAudioClient* client,
WAVEFORMATPCMEX* format);
- // Get the mix format that the audio engine uses internally for processing
- // of shared-mode streams using the default IMMDevice where flow direction
- // and role is define by |data_flow| and |role|.
- static HRESULT GetDefaultSharedModeMixFormat(EDataFlow data_flow,
- ERole role,
- WAVEFORMATPCMEX* format);
-
// Returns true if the specified |client| supports the format in |format|
// for the given |share_mode| (shared or exclusive).
static bool IsFormatSupported(IAudioClient* client,
@@ -144,7 +149,9 @@ class MEDIA_EXPORT CoreAudioUtil {
// and |role|. If this method returns true for a certain channel layout, it
// means that SharedModeInitialize() will succeed using a format based on
// the preferred format where the channel layout has been modified.
- static bool IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
+ static bool IsChannelLayoutSupported(const std::string& device_id,
+ EDataFlow data_flow,
+ ERole role,
ChannelLayout channel_layout);
// For a shared-mode stream, the audio engine periodically processes the
@@ -170,6 +177,19 @@ class MEDIA_EXPORT CoreAudioUtil {
static HRESULT GetPreferredAudioParameters(const std::string& device_id,
AudioParameters* params);
+ // Retrieves an integer mask which corresponds to the channel layout the
+ // audio engine uses for its internal processing/mixing of shared-mode
+ // streams. This mask indicates which channels are present in the multi-
+ // channel stream. The least significant bit corresponds with the Front Left
+ // speaker, the next least significant bit corresponds to the Front Right
+ // speaker, and so on, continuing in the order defined in KsMedia.h.
+ // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
+ // for more details.
+ // To get the channel config of the default device, pass an empty string
+ // for |device_id|.
+ static ChannelConfig GetChannelConfig(const std::string& device_id,
+ EDataFlow data_flow);
+
// After activating an IAudioClient interface on an audio endpoint device,
// the client must initialize it once, and only once, to initialize the audio
// stream between the client and the device. In shared mode, the client
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index abef8682020..e9ed0c4f597 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -274,7 +274,7 @@ TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_TRUE(mix_params.IsValid());
EXPECT_TRUE(CoreAudioUtil::IsChannelLayoutSupported(
- eRender, eConsole, mix_params.channel_layout()));
+ std::string(), eRender, eConsole, mix_params.channel_layout()));
// Check if it is possible to modify the channel layout to stereo for a
// device which reports that it prefers to be openen up in an other
@@ -284,7 +284,7 @@ TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
// TODO(henrika): it might be too pessimistic to assume false as return
// value here.
EXPECT_FALSE(CoreAudioUtil::IsChannelLayoutSupported(
- eRender, eConsole, channel_layout));
+ std::string(), eRender, eConsole, channel_layout));
}
}
diff --git a/chromium/media/audio/win/wavein_input_win.cc b/chromium/media/audio/win/wavein_input_win.cc
index 3c4147738df..05771250e01 100644
--- a/chromium/media/audio/win/wavein_input_win.cc
+++ b/chromium/media/audio/win/wavein_input_win.cc
@@ -8,7 +8,6 @@
#include "base/logging.h"
#include "media/audio/audio_io.h"
-#include "media/audio/audio_util.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/device_enumeration_win.h"
diff --git a/chromium/media/audio/win/waveout_output_win.cc b/chromium/media/audio/win/waveout_output_win.cc
index 47d4fa65053..0f54817b14a 100644
--- a/chromium/media/audio/win/waveout_output_win.cc
+++ b/chromium/media/audio/win/waveout_output_win.cc
@@ -248,39 +248,25 @@ void PCMWaveOutAudioOutputStream::Stop() {
state_ = PCMA_STOPPING;
base::subtle::MemoryBarrier();
- // Stop watching for buffer event, wait till all the callbacks are complete.
- // Should be done before ::waveOutReset() call to avoid race condition when
- // callback that is currently active and already checked that stream is still
- // being played calls ::waveOutWrite() after ::waveOutReset() returns, later
- // causing ::waveOutClose() to fail with WAVERR_STILLPLAYING.
- // TODO(enal): that delays actual stopping of playback. Alternative can be
- // to call ::waveOutReset() twice, once before
- // ::UnregisterWaitEx() and once after.
+ // Stop watching for buffer event, waits until outstanding callbacks finish.
if (waiting_handle_) {
- if (!::UnregisterWaitEx(waiting_handle_, INVALID_HANDLE_VALUE)) {
- state_ = PCMA_PLAYING;
- HandleError(MMSYSERR_ERROR);
- return;
- }
+ if (!::UnregisterWaitEx(waiting_handle_, INVALID_HANDLE_VALUE))
+ HandleError(::GetLastError());
waiting_handle_ = NULL;
}
// Stop playback.
MMRESULT res = ::waveOutReset(waveout_);
- if (res != MMSYSERR_NOERROR) {
- state_ = PCMA_PLAYING;
+ if (res != MMSYSERR_NOERROR)
HandleError(res);
- return;
- }
// Wait for lock to ensure all outstanding callbacks have completed.
base::AutoLock auto_lock(lock_);
// waveOutReset() leaves buffers in the unpredictable state, causing
// problems if we want to close, release, or reuse them. Fix the states.
- for (int ix = 0; ix != num_buffers_; ++ix) {
+ for (int ix = 0; ix != num_buffers_; ++ix)
GetBuffer(ix)->dwFlags = WHDR_PREPARED;
- }
// Don't use callback after Stop().
callback_ = NULL;
diff --git a/chromium/media/base/android/audio_decoder_job.cc b/chromium/media/base/android/audio_decoder_job.cc
index 2ac7c0389cb..d089796ccc6 100644
--- a/chromium/media/base/android/audio_decoder_job.cc
+++ b/chromium/media/base/android/audio_decoder_job.cc
@@ -58,16 +58,16 @@ void AudioDecoderJob::SetVolume(double volume) {
}
void AudioDecoderJob::ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback,
- MediaCodecStatus status) {
- audio_codec_bridge_->PlayOutputBuffer(outputBufferIndex, size);
+ int output_buffer_index,
+ size_t size,
+ bool render_output,
+ const ReleaseOutputCompletionCallback& callback) {
+ size_t size_to_render = render_output ? size : 0u;
+ if (size_to_render)
+ audio_codec_bridge_->PlayOutputBuffer(output_buffer_index, size_to_render);
+ audio_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false);
- if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
- audio_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, false);
-
- callback.Run(status, presentation_timestamp, size);
+ callback.Run(size_to_render);
}
bool AudioDecoderJob::ComputeTimeToRender() const {
diff --git a/chromium/media/base/android/audio_decoder_job.h b/chromium/media/base/android/audio_decoder_job.h
index 6ad8c28e25b..3d1b21f4b4a 100644
--- a/chromium/media/base/android/audio_decoder_job.h
+++ b/chromium/media/base/android/audio_decoder_job.h
@@ -40,10 +40,10 @@ class AudioDecoderJob : public MediaDecoderJob {
// MediaDecoderJob implementation.
virtual void ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback,
- MediaCodecStatus status) OVERRIDE;
+ int output_buffer_index,
+ size_t size,
+ bool render_output,
+ const ReleaseOutputCompletionCallback& callback) OVERRIDE;
virtual bool ComputeTimeToRender() const OVERRIDE;
diff --git a/chromium/media/base/android/demuxer_android.h b/chromium/media/base/android/demuxer_android.h
index 33902db728d..865dc9d33f2 100644
--- a/chromium/media/base/android/demuxer_android.h
+++ b/chromium/media/base/android/demuxer_android.h
@@ -16,36 +16,29 @@ class DemuxerAndroidClient;
struct DemuxerConfigs;
struct DemuxerData;
-// Defines a demuxer with ID-based asynchronous operations.
-//
-// TODO(scherkus): Remove |demuxer_client_id| and Add/RemoveDemuxerClient().
-// It's required in the interim as the Android Media Source implementation uses
-// the MediaPlayerAndroid interface and associated IPC messages.
+// Defines a demuxer with asynchronous operations.
class MEDIA_EXPORT DemuxerAndroid {
public:
- // Associates |client| with the demuxer using |demuxer_client_id| as the
- // identifier. Must be called prior to calling any other methods.
- virtual void AddDemuxerClient(int demuxer_client_id,
- DemuxerAndroidClient* client) = 0;
+ virtual ~DemuxerAndroid() {}
- // Removes the association created by AddClient(). Must be called when the
- // client no longer wants to receive updates.
- virtual void RemoveDemuxerClient(int demuxer_client_id) = 0;
+ // Initializes this demuxer with |client| as the callback handler.
+ // Must be called prior to calling any other methods.
+ virtual void Initialize(DemuxerAndroidClient* client) = 0;
// Called to request the current audio/video decoder configurations.
- virtual void RequestDemuxerConfigs(int demuxer_client_id) = 0;
+ virtual void RequestDemuxerConfigs() = 0;
- // Called to request additiona data from the demuxer.
- virtual void RequestDemuxerData(int demuxer_client_id,
- media::DemuxerStream::Type type) = 0;
+ // Called to request additional data from the demuxer.
+ virtual void RequestDemuxerData(media::DemuxerStream::Type type) = 0;
// Called to request the demuxer to seek to a particular media time.
- virtual void RequestDemuxerSeek(int demuxer_client_id,
- base::TimeDelta time_to_seek,
- unsigned seek_request_id) = 0;
-
- protected:
- virtual ~DemuxerAndroid() {}
+ // |is_browser_seek| is true if the renderer is not previously expecting this
+ // seek and must coordinate with other regular seeks. Browser seek existence
+ // should be hidden as much as possible from the renderer player and web apps.
+ // TODO(wolenetz): Instead of doing browser seek, replay cached data since
+ // last keyframe. See http://crbug.com/304234.
+ virtual void RequestDemuxerSeek(const base::TimeDelta& time_to_seek,
+ bool is_browser_seek) = 0;
};
// Defines the client callback interface.
@@ -63,7 +56,13 @@ class MEDIA_EXPORT DemuxerAndroidClient {
virtual void OnDemuxerDataAvailable(const DemuxerData& params) = 0;
// Called in response to RequestDemuxerSeek().
- virtual void OnDemuxerSeeked(unsigned seek_request_id) = 0;
+ // If this is in response to a request with |is_browser_seek| set to true,
+ // then |actual_browser_seek_time| may differ from the requested
+ // |time_to_seek|, and reflects the actual time seeked to by the demuxer.
+ // For regular demuxer seeks, |actual_browser_seek_time| is kNoTimestamp() and
+ // should be ignored by browser player.
+ virtual void OnDemuxerSeekDone(
+ const base::TimeDelta& actual_browser_seek_time) = 0;
// Called whenever the demuxer has detected a duration change.
virtual void OnDemuxerDurationChanged(base::TimeDelta duration) = 0;
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
index a029e209805..6e7987fd055 100644
--- a/chromium/media/base/android/media_codec_bridge.cc
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -15,6 +15,7 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/safe_numerics.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "jni/MediaCodecBridge_jni.h"
#include "media/base/bit_reader.h"
@@ -27,9 +28,13 @@ using base::android::ScopedJavaLocalRef;
namespace media {
-enum { kBufferFlagEndOfStream = 4 };
+enum {
+ kBufferFlagSyncFrame = 1, // BUFFER_FLAG_SYNC_FRAME
+ kBufferFlagEndOfStream = 4, // BUFFER_FLAG_END_OF_STREAM
+ kConfigureFlagEncode = 1, // CONFIGURE_FLAG_ENCODE
+};
-static const std::string AudioCodecToAndroidMimeType(const AudioCodec codec) {
+static const std::string AudioCodecToAndroidMimeType(const AudioCodec& codec) {
switch (codec) {
case kCodecMP3:
return "audio/mpeg";
@@ -42,12 +47,14 @@ static const std::string AudioCodecToAndroidMimeType(const AudioCodec codec) {
}
}
-static const std::string VideoCodecToAndroidMimeType(const VideoCodec codec) {
+static const std::string VideoCodecToAndroidMimeType(const VideoCodec& codec) {
switch (codec) {
case kCodecH264:
return "video/avc";
case kCodecVP8:
return "video/x-vnd.on2.vp8";
+ case kCodecVP9:
+ return "video/x-vnd.on2.vp9";
default:
return std::string();
}
@@ -61,6 +68,8 @@ static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
return "audio/mp4a-latm";
if (codec == "vp8" || codec == "vp8.0")
return "video/x-vnd.on2.vp8";
+ if (codec == "vp9" || codec == "vp9.0")
+ return "video/x-vnd.on2.vp9";
if (codec == "vorbis")
return "audio/vorbis";
return std::string();
@@ -85,8 +94,8 @@ static const std::string AndroidMimeTypeToCodecType(const std::string& mime) {
return std::string();
}
-static ScopedJavaLocalRef<jintArray> ToJavaIntArray(
- JNIEnv* env, scoped_ptr<jint[]> native_array, int size) {
+static ScopedJavaLocalRef<jintArray>
+ToJavaIntArray(JNIEnv* env, scoped_ptr<jint[]> native_array, int size) {
ScopedJavaLocalRef<jintArray> j_array(env, env->NewIntArray(size));
env->SetIntArrayRegion(j_array.obj(), 0, size, native_array.get());
return j_array;
@@ -99,13 +108,20 @@ bool MediaCodecBridge::IsAvailable() {
}
// static
-void MediaCodecBridge::GetCodecsInfo(
- std::vector<CodecsInfo>* codecs_info) {
+bool MediaCodecBridge::SupportsSetParameters() {
+ // MediaCodec.setParameters() is only available starting with K.
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 19;
+}
+
+// static
+std::vector<MediaCodecBridge::CodecsInfo> MediaCodecBridge::GetCodecsInfo() {
+ std::vector<CodecsInfo> codecs_info;
JNIEnv* env = AttachCurrentThread();
if (!IsAvailable())
- return;
+ return codecs_info;
std::string mime_type;
+ std::string codec_name;
ScopedJavaLocalRef<jobjectArray> j_codec_info_array =
Java_MediaCodecBridge_getCodecsInfo(env);
jsize len = env->GetArrayLength(j_codec_info_array.obj());
@@ -115,12 +131,16 @@ void MediaCodecBridge::GetCodecsInfo(
ScopedJavaLocalRef<jstring> j_codec_type =
Java_CodecInfo_codecType(env, j_info.obj());
ConvertJavaStringToUTF8(env, j_codec_type.obj(), &mime_type);
+ ScopedJavaLocalRef<jstring> j_codec_name =
+ Java_CodecInfo_codecName(env, j_info.obj());
CodecsInfo info;
info.codecs = AndroidMimeTypeToCodecType(mime_type);
- info.secure_decoder_supported =
- Java_CodecInfo_isSecureDecoderSupported(env, j_info.obj());
- codecs_info->push_back(info);
+ ConvertJavaStringToUTF8(env, j_codec_name.obj(), &info.name);
+ info.direction = static_cast<MediaCodecDirection>(
+ Java_CodecInfo_direction(env, j_info.obj()));
+ codecs_info.push_back(info);
}
+ return codecs_info;
}
// static
@@ -131,7 +151,7 @@ bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
return false;
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_media_codec_bridge =
- Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure);
+ Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure, false);
if (!j_media_codec_bridge.is_null()) {
Java_MediaCodecBridge_release(env, j_media_codec_bridge.obj());
return true;
@@ -139,13 +159,33 @@ bool MediaCodecBridge::CanDecode(const std::string& codec, bool is_secure) {
return false;
}
-MediaCodecBridge::MediaCodecBridge(const std::string& mime, bool is_secure) {
+// static
+bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
+ MediaCodecDirection direction) {
+ std::string codec_type = AndroidMimeTypeToCodecType(mime_type);
+ std::vector<media::MediaCodecBridge::CodecsInfo> codecs_info =
+ MediaCodecBridge::GetCodecsInfo();
+ for (size_t i = 0; i < codecs_info.size(); ++i) {
+ if (codecs_info[i].codecs == codec_type &&
+ codecs_info[i].direction == direction) {
+ // It would be nice if MediaCodecInfo externalized some notion of
+ // HW-acceleration but it doesn't. Android Media guidance is that the
+ // prefix below is always used for SW decoders, so that's what we use.
+ return StartsWithASCII(codecs_info[i].name, "OMX.google.", true);
+ }
+ }
+ return true;
+}
+
+MediaCodecBridge::MediaCodecBridge(const std::string& mime,
+ bool is_secure,
+ MediaCodecDirection direction) {
JNIEnv* env = AttachCurrentThread();
CHECK(env);
DCHECK(!mime.empty());
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
j_media_codec_.Reset(
- Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure));
+ Java_MediaCodecBridge_create(env, j_mime.obj(), is_secure, direction));
}
MediaCodecBridge::~MediaCodecBridge() {
@@ -180,23 +220,42 @@ void MediaCodecBridge::GetOutputFormat(int* width, int* height) {
}
MediaCodecStatus MediaCodecBridge::QueueInputBuffer(
- int index, const uint8* data, int data_size,
+ int index,
+ const uint8* data,
+ size_t data_size,
const base::TimeDelta& presentation_time) {
- int size_to_copy = FillInputBuffer(index, data, data_size);
- DCHECK_EQ(size_to_copy, data_size);
+ DVLOG(3) << __PRETTY_FUNCTION__ << index << ": " << data_size;
+ if (data_size > base::checked_numeric_cast<size_t>(kint32max))
+ return MEDIA_CODEC_ERROR;
+ if (data && !FillInputBuffer(index, data, data_size))
+ return MEDIA_CODEC_ERROR;
JNIEnv* env = AttachCurrentThread();
- return static_cast<MediaCodecStatus>(Java_MediaCodecBridge_queueInputBuffer(
- env, j_media_codec_.obj(),
- index, 0, size_to_copy, presentation_time.InMicroseconds(), 0));
+ return static_cast<MediaCodecStatus>(
+ Java_MediaCodecBridge_queueInputBuffer(env,
+ j_media_codec_.obj(),
+ index,
+ 0,
+ data_size,
+ presentation_time.InMicroseconds(),
+ 0));
}
MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
- int index, const uint8* data, int data_size, const uint8* key_id,
- int key_id_size, const uint8* iv, int iv_size,
- const SubsampleEntry* subsamples, int subsamples_size,
+ int index,
+ const uint8* data,
+ size_t data_size,
+ const uint8* key_id,
+ int key_id_size,
+ const uint8* iv,
+ int iv_size,
+ const SubsampleEntry* subsamples,
+ int subsamples_size,
const base::TimeDelta& presentation_time) {
- int size_to_copy = FillInputBuffer(index, data, data_size);
- DCHECK_EQ(size_to_copy, data_size);
+ DVLOG(3) << __PRETTY_FUNCTION__ << index << ": " << data_size;
+ if (data_size > base::checked_numeric_cast<size_t>(kint32max))
+ return MEDIA_CODEC_ERROR;
+ if (data && !FillInputBuffer(index, data, data_size))
+ return MEDIA_CODEC_ERROR;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jbyteArray> j_key_id =
@@ -233,55 +292,87 @@ MediaCodecStatus MediaCodecBridge::QueueSecureInputBuffer(
}
ScopedJavaLocalRef<jintArray> clear_array =
- ToJavaIntArray(env, native_clear_array.Pass(), new_subsamples_size);
+ ToJavaIntArray(env, native_clear_array.Pass(), new_subsamples_size);
ScopedJavaLocalRef<jintArray> cypher_array =
- ToJavaIntArray(env, native_cypher_array.Pass(), new_subsamples_size);
+ ToJavaIntArray(env, native_cypher_array.Pass(), new_subsamples_size);
return static_cast<MediaCodecStatus>(
Java_MediaCodecBridge_queueSecureInputBuffer(
- env, j_media_codec_.obj(), index, 0, j_iv.obj(), j_key_id.obj(),
- clear_array.obj(), cypher_array.obj(), new_subsamples_size,
+ env,
+ j_media_codec_.obj(),
+ index,
+ 0,
+ j_iv.obj(),
+ j_key_id.obj(),
+ clear_array.obj(),
+ cypher_array.obj(),
+ new_subsamples_size,
presentation_time.InMicroseconds()));
}
void MediaCodecBridge::QueueEOS(int input_buffer_index) {
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": " << input_buffer_index;
JNIEnv* env = AttachCurrentThread();
- Java_MediaCodecBridge_queueInputBuffer(
- env, j_media_codec_.obj(),
- input_buffer_index, 0, 0, 0, kBufferFlagEndOfStream);
+ Java_MediaCodecBridge_queueInputBuffer(env,
+ j_media_codec_.obj(),
+ input_buffer_index,
+ 0,
+ 0,
+ 0,
+ kBufferFlagEndOfStream);
}
MediaCodecStatus MediaCodecBridge::DequeueInputBuffer(
- const base::TimeDelta& timeout, int* index) {
+ const base::TimeDelta& timeout,
+ int* index) {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> result = Java_MediaCodecBridge_dequeueInputBuffer(
env, j_media_codec_.obj(), timeout.InMicroseconds());
*index = Java_DequeueInputResult_index(env, result.obj());
- return static_cast<MediaCodecStatus>(
+ MediaCodecStatus status = static_cast<MediaCodecStatus>(
Java_DequeueInputResult_status(env, result.obj()));
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": status: " << status
+ << ", index: " << *index;
+ return status;
}
MediaCodecStatus MediaCodecBridge::DequeueOutputBuffer(
- const base::TimeDelta& timeout, int* index, size_t* offset, size_t* size,
- base::TimeDelta* presentation_time, bool* end_of_stream) {
+ const base::TimeDelta& timeout,
+ int* index,
+ size_t* offset,
+ size_t* size,
+ base::TimeDelta* presentation_time,
+ bool* end_of_stream,
+ bool* key_frame) {
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> result =
- Java_MediaCodecBridge_dequeueOutputBuffer(env, j_media_codec_.obj(),
- timeout.InMicroseconds());
- *index = Java_DequeueOutputResult_index(env, result.obj());;
+ Java_MediaCodecBridge_dequeueOutputBuffer(
+ env, j_media_codec_.obj(), timeout.InMicroseconds());
+ *index = Java_DequeueOutputResult_index(env, result.obj());
*offset = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_offset(env, result.obj()));
+ Java_DequeueOutputResult_offset(env, result.obj()));
*size = base::checked_numeric_cast<size_t>(
- Java_DequeueOutputResult_numBytes(env, result.obj()));
- *presentation_time = base::TimeDelta::FromMicroseconds(
- Java_DequeueOutputResult_presentationTimeMicroseconds(env, result.obj()));
+ Java_DequeueOutputResult_numBytes(env, result.obj()));
+ if (presentation_time) {
+ *presentation_time = base::TimeDelta::FromMicroseconds(
+ Java_DequeueOutputResult_presentationTimeMicroseconds(env,
+ result.obj()));
+ }
int flags = Java_DequeueOutputResult_flags(env, result.obj());
- *end_of_stream = flags & kBufferFlagEndOfStream;
- return static_cast<MediaCodecStatus>(
+ if (end_of_stream)
+ *end_of_stream = flags & kBufferFlagEndOfStream;
+ if (key_frame)
+ *key_frame = flags & kBufferFlagSyncFrame;
+ MediaCodecStatus status = static_cast<MediaCodecStatus>(
Java_DequeueOutputResult_status(env, result.obj()));
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": status: " << status
+ << ", index: " << *index << ", offset: " << *offset
+ << ", size: " << *size << ", flags: " << flags;
+ return status;
}
void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": " << index;
JNIEnv* env = AttachCurrentThread();
CHECK(env);
@@ -289,41 +380,85 @@ void MediaCodecBridge::ReleaseOutputBuffer(int index, bool render) {
env, j_media_codec_.obj(), index, render);
}
+int MediaCodecBridge::GetInputBuffersCount() {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaCodecBridge_getInputBuffersCount(env, j_media_codec_.obj());
+}
+
+int MediaCodecBridge::GetOutputBuffersCount() {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaCodecBridge_getOutputBuffersCount(env, j_media_codec_.obj());
+}
+
+size_t MediaCodecBridge::GetOutputBuffersCapacity() {
+ JNIEnv* env = AttachCurrentThread();
+ return Java_MediaCodecBridge_getOutputBuffersCapacity(env,
+ j_media_codec_.obj());
+}
+
bool MediaCodecBridge::GetOutputBuffers() {
JNIEnv* env = AttachCurrentThread();
return Java_MediaCodecBridge_getOutputBuffers(env, j_media_codec_.obj());
}
-size_t MediaCodecBridge::FillInputBuffer(
- int index, const uint8* data, int size) {
+void MediaCodecBridge::GetInputBuffer(int input_buffer_index,
+ uint8** data,
+ size_t* capacity) {
JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jobject> j_buffer(Java_MediaCodecBridge_getInputBuffer(
+ env, j_media_codec_.obj(), input_buffer_index));
+ *data = static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj()));
+ *capacity = base::checked_numeric_cast<size_t>(
+ env->GetDirectBufferCapacity(j_buffer.obj()));
+}
+bool MediaCodecBridge::CopyFromOutputBuffer(int index,
+ size_t offset,
+ void* dst,
+ int dst_size) {
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> j_buffer(
- Java_MediaCodecBridge_getInputBuffer(env, j_media_codec_.obj(), index));
+ Java_MediaCodecBridge_getOutputBuffer(env, j_media_codec_.obj(), index));
+ void* src_data =
+ reinterpret_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj())) +
+ offset;
+ int src_capacity = env->GetDirectBufferCapacity(j_buffer.obj()) - offset;
+ if (src_capacity < dst_size)
+ return false;
+ memcpy(dst, src_data, dst_size);
+ return true;
+}
- uint8* direct_buffer =
- static_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj()));
- int64 buffer_capacity = env->GetDirectBufferCapacity(j_buffer.obj());
+bool MediaCodecBridge::FillInputBuffer(int index,
+ const uint8* data,
+ size_t size) {
+ uint8* dst = NULL;
+ size_t capacity = 0;
+ GetInputBuffer(index, &dst, &capacity);
+ CHECK(dst);
- int size_to_copy = (buffer_capacity < size) ? buffer_capacity : size;
- // TODO(qinmin): Handling the case that not all the data can be copied.
- DCHECK(size_to_copy == size) <<
- "Failed to fill all the data into the input buffer. Size to fill: "
- << size << ". Size filled: " << size_to_copy;
- if (size_to_copy > 0)
- memcpy(direct_buffer, data, size_to_copy);
- return size_to_copy;
-}
+ if (size > capacity) {
+ LOG(ERROR) << "Input buffer size " << size
+ << " exceeds MediaCodec input buffer capacity: " << capacity;
+ return false;
+ }
-AudioCodecBridge::AudioCodecBridge(const std::string& mime)
- // Audio codec doesn't care about security level.
- : MediaCodecBridge(mime, false) {
+ memcpy(dst, data, size);
+ return true;
}
-bool AudioCodecBridge::Start(
- const AudioCodec codec, int sample_rate, int channel_count,
- const uint8* extra_data, size_t extra_data_size, bool play_audio,
- jobject media_crypto) {
+AudioCodecBridge::AudioCodecBridge(const std::string& mime)
+ // Audio codec doesn't care about security level and there is no need for
+ // audio encoding yet.
+ : MediaCodecBridge(mime, false, MEDIA_CODEC_DECODER) {}
+
+bool AudioCodecBridge::Start(const AudioCodec& codec,
+ int sample_rate,
+ int channel_count,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool play_audio,
+ jobject media_crypto) {
JNIEnv* env = AttachCurrentThread();
if (!media_codec())
@@ -335,32 +470,31 @@ bool AudioCodecBridge::Start(
ScopedJavaLocalRef<jstring> j_mime =
ConvertUTF8ToJavaString(env, codec_string);
- ScopedJavaLocalRef<jobject> j_format(
- Java_MediaCodecBridge_createAudioFormat(
- env, j_mime.obj(), sample_rate, channel_count));
+ ScopedJavaLocalRef<jobject> j_format(Java_MediaCodecBridge_createAudioFormat(
+ env, j_mime.obj(), sample_rate, channel_count));
DCHECK(!j_format.is_null());
if (!ConfigureMediaFormat(j_format.obj(), codec, extra_data, extra_data_size))
return false;
if (!Java_MediaCodecBridge_configureAudio(
- env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
+ env, media_codec(), j_format.obj(), media_crypto, 0, play_audio)) {
return false;
}
return StartInternal();
}
-bool AudioCodecBridge::ConfigureMediaFormat(
- jobject j_format, const AudioCodec codec, const uint8* extra_data,
- size_t extra_data_size) {
+bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
+ const AudioCodec& codec,
+ const uint8* extra_data,
+ size_t extra_data_size) {
if (extra_data_size == 0)
return true;
JNIEnv* env = AttachCurrentThread();
switch (codec) {
- case kCodecVorbis:
- {
+ case kCodecVorbis: {
if (extra_data[0] != 2) {
LOG(ERROR) << "Invalid number of vorbis headers before the codec "
<< "header: " << extra_data[0];
@@ -405,8 +539,7 @@ bool AudioCodecBridge::ConfigureMediaFormat(
env, j_format, 1, last_header.obj());
break;
}
- case kCodecAAC:
- {
+ case kCodecAAC: {
media::BitReader reader(extra_data, extra_data_size);
// The following code is copied from aac.cc
@@ -465,8 +598,7 @@ void AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
ScopedJavaLocalRef<jbyteArray> byte_array =
base::android::ToJavaByteArray(env, buffer, numBytes);
- Java_MediaCodecBridge_playOutputBuffer(
- env, media_codec(), byte_array.obj());
+ Java_MediaCodecBridge_playOutputBuffer(env, media_codec(), byte_array.obj());
}
void AudioCodecBridge::SetVolume(double volume) {
@@ -474,45 +606,108 @@ void AudioCodecBridge::SetVolume(double volume) {
Java_MediaCodecBridge_setVolume(env, media_codec(), volume);
}
-VideoCodecBridge::VideoCodecBridge(const std::string& mime, bool is_secure)
- : MediaCodecBridge(mime, is_secure) {
+AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec& codec) {
+ const std::string mime = AudioCodecToAndroidMimeType(codec);
+ return mime.empty() ? NULL : new AudioCodecBridge(mime);
+}
+
+// static
+bool AudioCodecBridge::IsKnownUnaccelerated(const AudioCodec& codec) {
+ return MediaCodecBridge::IsKnownUnaccelerated(
+ AudioCodecToAndroidMimeType(codec), MEDIA_CODEC_DECODER);
}
-bool VideoCodecBridge::Start(
- const VideoCodec codec, const gfx::Size& size, jobject surface,
- jobject media_crypto) {
- JNIEnv* env = AttachCurrentThread();
+// static
+bool VideoCodecBridge::IsKnownUnaccelerated(const VideoCodec& codec,
+ MediaCodecDirection direction) {
+ return MediaCodecBridge::IsKnownUnaccelerated(
+ VideoCodecToAndroidMimeType(codec), direction);
+}
- if (!media_codec())
- return false;
+VideoCodecBridge* VideoCodecBridge::CreateDecoder(const VideoCodec& codec,
+ bool is_secure,
+ const gfx::Size& size,
+ jobject surface,
+ jobject media_crypto) {
+ JNIEnv* env = AttachCurrentThread();
+ const std::string mime = VideoCodecToAndroidMimeType(codec);
+ if (mime.empty())
+ return NULL;
- std::string codec_string = VideoCodecToAndroidMimeType(codec);
- if (codec_string.empty())
- return false;
+ scoped_ptr<VideoCodecBridge> bridge(
+ new VideoCodecBridge(mime, is_secure, MEDIA_CODEC_DECODER));
+ if (!bridge->media_codec())
+ return NULL;
- ScopedJavaLocalRef<jstring> j_mime =
- ConvertUTF8ToJavaString(env, codec_string);
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
ScopedJavaLocalRef<jobject> j_format(
- Java_MediaCodecBridge_createVideoFormat(
+ Java_MediaCodecBridge_createVideoDecoderFormat(
env, j_mime.obj(), size.width(), size.height()));
DCHECK(!j_format.is_null());
- if (!Java_MediaCodecBridge_configureVideo(
- env, media_codec(), j_format.obj(), surface, media_crypto, 0)) {
- return false;
+ if (!Java_MediaCodecBridge_configureVideo(env,
+ bridge->media_codec(),
+ j_format.obj(),
+ surface,
+ media_crypto,
+ 0)) {
+ return NULL;
}
- return StartInternal();
+ return bridge->StartInternal() ? bridge.release() : NULL;
}
-AudioCodecBridge* AudioCodecBridge::Create(const AudioCodec codec) {
- const std::string mime = AudioCodecToAndroidMimeType(codec);
- return mime.empty() ? NULL : new AudioCodecBridge(mime);
+VideoCodecBridge* VideoCodecBridge::CreateEncoder(const VideoCodec& codec,
+ const gfx::Size& size,
+ int bit_rate,
+ int frame_rate,
+ int i_frame_interval,
+ int color_format) {
+ JNIEnv* env = AttachCurrentThread();
+ const std::string mime = VideoCodecToAndroidMimeType(codec);
+ if (mime.empty())
+ return NULL;
+
+ scoped_ptr<VideoCodecBridge> bridge(
+ new VideoCodecBridge(mime, false, MEDIA_CODEC_ENCODER));
+ if (!bridge->media_codec())
+ return NULL;
+
+ ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, mime);
+ ScopedJavaLocalRef<jobject> j_format(
+ Java_MediaCodecBridge_createVideoEncoderFormat(env,
+ j_mime.obj(),
+ size.width(),
+ size.height(),
+ bit_rate,
+ frame_rate,
+ i_frame_interval,
+ color_format));
+ DCHECK(!j_format.is_null());
+ if (!Java_MediaCodecBridge_configureVideo(env,
+ bridge->media_codec(),
+ j_format.obj(),
+ NULL,
+ NULL,
+ kConfigureFlagEncode)) {
+ return NULL;
+ }
+
+ return bridge->StartInternal() ? bridge.release() : NULL;
}
-VideoCodecBridge* VideoCodecBridge::Create(const VideoCodec codec,
- bool is_secure) {
- const std::string mime = VideoCodecToAndroidMimeType(codec);
- return mime.empty() ? NULL : new VideoCodecBridge(mime, is_secure);
+VideoCodecBridge::VideoCodecBridge(const std::string& mime,
+ bool is_secure,
+ MediaCodecDirection direction)
+ : MediaCodecBridge(mime, is_secure, direction) {}
+
+void VideoCodecBridge::SetVideoBitrate(int bps) {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_setVideoBitrate(env, media_codec(), bps);
+}
+
+void VideoCodecBridge::RequestKeyFrameSoon() {
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaCodecBridge_requestKeyFrameSoon(env, media_codec());
}
bool MediaCodecBridge::RegisterMediaCodecBridge(JNIEnv* env) {
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
index df30472e69e..e71f67f918e 100644
--- a/chromium/media/base/android/media_codec_bridge.h
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -33,32 +33,44 @@ enum MediaCodecStatus {
MEDIA_CODEC_ERROR
};
+// Codec direction. Keep this in sync with MediaCodecBridge.java.
+enum MediaCodecDirection {
+ MEDIA_CODEC_DECODER,
+ MEDIA_CODEC_ENCODER,
+};
+
// This class serves as a bridge for native code to call java functions inside
// Android MediaCodec class. For more information on Android MediaCodec, check
// http://developer.android.com/reference/android/media/MediaCodec.html
// Note: MediaCodec is only available on JB and greater.
// Use AudioCodecBridge or VideoCodecBridge to create an instance of this
// object.
+//
+// TODO(fischman,xhwang): replace this (and the enums that go with it) with
+// chromium's JNI auto-generation hotness.
class MEDIA_EXPORT MediaCodecBridge {
public:
// Returns true if MediaCodec is available on the device.
static bool IsAvailable();
+ // Returns true if MediaCodec.setParameters() is available on the device.
+ static bool SupportsSetParameters();
+
// Returns whether MediaCodecBridge has a decoder that |is_secure| and can
// decode |codec| type.
static bool CanDecode(const std::string& codec, bool is_secure);
- // Represents supported codecs on android. |secure_decoder_supported| is true
- // if secure decoder is available for the codec type.
+ // Represents supported codecs on android.
// TODO(qinmin): Curretly the codecs string only contains one codec, do we
// need more specific codecs separated by comma. (e.g. "vp8" -> "vp8, vp8.0")
struct CodecsInfo {
- std::string codecs;
- bool secure_decoder_supported;
+ std::string codecs; // E.g. "vp8" or "avc1".
+ std::string name; // E.g. "OMX.google.vp8.decoder".
+ MediaCodecDirection direction;
};
// Get a list of supported codecs.
- static void GetCodecsInfo(std::vector<CodecsInfo>* codecs_info);
+ static std::vector<CodecsInfo> GetCodecsInfo();
virtual ~MediaCodecBridge();
@@ -81,21 +93,32 @@ class MEDIA_EXPORT MediaCodecBridge {
// returns a format change by returning INFO_OUTPUT_FORMAT_CHANGED
void GetOutputFormat(int* width, int* height);
+ // Returns the number of input buffers used by the codec.
+ int GetInputBuffersCount();
+
// Submits a byte array to the given input buffer. Call this after getting an
- // available buffer from DequeueInputBuffer().
+ // available buffer from DequeueInputBuffer(). If |data| is NULL, assume the
+ // input buffer has already been populated (but still obey |size|).
+ // |data_size| must be less than kint32max (because Java).
MediaCodecStatus QueueInputBuffer(int index,
const uint8* data,
- int size,
+ size_t data_size,
const base::TimeDelta& presentation_time);
- // Similar to the above call, but submits a buffer that is encrypted.
- // Note: NULL |subsamples| indicates the whole buffer is encrypted.
+ // Similar to the above call, but submits a buffer that is encrypted. Note:
+ // NULL |subsamples| indicates the whole buffer is encrypted. If |data| is
+ // NULL, assume the input buffer has already been populated (but still obey
+ // |data_size|). |data_size| must be less than kint32max (because Java).
MediaCodecStatus QueueSecureInputBuffer(
int index,
- const uint8* data, int data_size,
- const uint8* key_id, int key_id_size,
- const uint8* iv, int iv_size,
- const SubsampleEntry* subsamples, int subsamples_size,
+ const uint8* data,
+ size_t data_size,
+ const uint8* key_id,
+ int key_id_size,
+ const uint8* iv,
+ int iv_size,
+ const SubsampleEntry* subsamples,
+ int subsamples_size,
const base::TimeDelta& presentation_time);
// Submits an empty buffer with a EOS (END OF STREAM) flag.
@@ -113,7 +136,8 @@ class MEDIA_EXPORT MediaCodecBridge {
// Dequeues an output buffer, block at most timeout_us microseconds.
// Returns the status of this operation. If OK is returned, the output
// parameters should be populated. Otherwise, the values of output parameters
- // should not be used.
+ // should not be used. Output parameters other than index/offset/size are
+ // optional and only set if not NULL.
// Note: Never use infinite timeout as this would block the decoder thread and
// prevent the decoder job from being released.
// TODO(xhwang): Can we drop |end_of_stream| and return
@@ -123,21 +147,42 @@ class MEDIA_EXPORT MediaCodecBridge {
size_t* offset,
size_t* size,
base::TimeDelta* presentation_time,
- bool* end_of_stream);
+ bool* end_of_stream,
+ bool* key_frame);
- // Returns the buffer to the codec. If you previously specified a surface
- // when configuring this video decoder you can optionally render the buffer.
+ // Returns the buffer to the codec. If you previously specified a surface when
+ // configuring this video decoder you can optionally render the buffer.
void ReleaseOutputBuffer(int index, bool render);
+ // Returns the number of output buffers used by the codec.
+ int GetOutputBuffersCount();
+
+ // Returns the capacity of each output buffer used by the codec.
+ size_t GetOutputBuffersCapacity();
+
// Gets output buffers from media codec and keeps them inside the java class.
// To access them, use DequeueOutputBuffer(). Returns whether output buffers
// were successfully obtained.
bool GetOutputBuffers() WARN_UNUSED_RESULT;
+ // Returns an input buffer's base pointer and capacity.
+ void GetInputBuffer(int input_buffer_index, uint8** data, size_t* capacity);
+
+ // Copy |dst_size| bytes from output buffer |index|'s |offset| onwards into
+ // |*dst|.
+ bool CopyFromOutputBuffer(int index, size_t offset, void* dst, int dst_size);
+
static bool RegisterMediaCodecBridge(JNIEnv* env);
protected:
- MediaCodecBridge(const std::string& mime, bool is_secure);
+ // Returns true if |mime_type| is known to be unaccelerated (i.e. backed by a
+ // software codec instead of a hardware one).
+ static bool IsKnownUnaccelerated(const std::string& mime_type,
+ MediaCodecDirection direction);
+
+ MediaCodecBridge(const std::string& mime,
+ bool is_secure,
+ MediaCodecDirection direction);
// Calls start() against the media codec instance. Used in StartXXX() after
// configuring media codec. Returns whether media codec was successfully
@@ -145,10 +190,14 @@ class MEDIA_EXPORT MediaCodecBridge {
bool StartInternal() WARN_UNUSED_RESULT;
jobject media_codec() { return j_media_codec_.obj(); }
+ MediaCodecDirection direction_;
private:
- // Fills a particular input buffer and returns the size of copied data.
- size_t FillInputBuffer(int index, const uint8* data, int data_size);
+ // Fills a particular input buffer; returns false if |data_size| exceeds the
+ // input buffer's capacity (and doesn't touch the input buffer in that case).
+ bool FillInputBuffer(int index,
+ const uint8* data,
+ size_t data_size) WARN_UNUSED_RESULT;
// Java MediaCodec instance.
base::android::ScopedJavaGlobalRef<jobject> j_media_codec_;
@@ -160,10 +209,13 @@ class AudioCodecBridge : public MediaCodecBridge {
public:
// Returns an AudioCodecBridge instance if |codec| is supported, or a NULL
// pointer otherwise.
- static AudioCodecBridge* Create(const AudioCodec codec);
+ static AudioCodecBridge* Create(const AudioCodec& codec);
+
+ // See MediaCodecBridge::IsKnownUnaccelerated().
+ static bool IsKnownUnaccelerated(const AudioCodec& codec);
// Start the audio codec bridge.
- bool Start(const AudioCodec codec, int sample_rate, int channel_count,
+ bool Start(const AudioCodec& codec, int sample_rate, int channel_count,
const uint8* extra_data, size_t extra_data_size,
bool play_audio, jobject media_crypto) WARN_UNUSED_RESULT;
@@ -178,26 +230,42 @@ class AudioCodecBridge : public MediaCodecBridge {
explicit AudioCodecBridge(const std::string& mime);
// Configure the java MediaFormat object with the extra codec data passed in.
- bool ConfigureMediaFormat(jobject j_format, const AudioCodec codec,
+ bool ConfigureMediaFormat(jobject j_format, const AudioCodec& codec,
const uint8* extra_data, size_t extra_data_size);
};
class MEDIA_EXPORT VideoCodecBridge : public MediaCodecBridge {
public:
- // Returns an VideoCodecBridge instance if |codec| is supported, or a NULL
- // pointer otherwise.
- static VideoCodecBridge* Create(const VideoCodec codec, bool is_secure);
-
- // Start the video codec bridge.
- // TODO(qinmin): Pass codec specific data if available.
- bool Start(const VideoCodec codec, const gfx::Size& size, jobject surface,
- jobject media_crypto);
+ // See MediaCodecBridge::IsKnownUnaccelerated().
+ static bool IsKnownUnaccelerated(const VideoCodec& codec,
+ MediaCodecDirection direction);
+
+ // Create, start, and return a VideoCodecBridge decoder or NULL on failure.
+ static VideoCodecBridge* CreateDecoder(
+ const VideoCodec& codec, // e.g. media::kCodecVP8
+ bool is_secure,
+ const gfx::Size& size, // Output frame size.
+ jobject surface, // Output surface, optional.
+ jobject media_crypto); // MediaCrypto object, optional.
+
+ // Create, start, and return a VideoCodecBridge encoder or NULL on failure.
+ static VideoCodecBridge* CreateEncoder(
+ const VideoCodec& codec, // e.g. media::kCodecVP8
+ const gfx::Size& size, // input frame size
+ int bit_rate, // bits/second
+ int frame_rate, // frames/second
+ int i_frame_interval, // count
+ int color_format); // MediaCodecInfo.CodecCapabilities.
+
+ void SetVideoBitrate(int bps);
+ void RequestKeyFrameSoon();
private:
- VideoCodecBridge(const std::string& mime, bool is_secure);
+ VideoCodecBridge(const std::string& mime,
+ bool is_secure,
+ MediaCodecDirection direction);
};
} // namespace media
#endif // MEDIA_BASE_ANDROID_MEDIA_CODEC_BRIDGE_H_
-
diff --git a/chromium/media/base/android/media_codec_bridge_unittest.cc b/chromium/media/base/android/media_codec_bridge_unittest.cc
index 1e24b5f28b5..c72e6a1a20c 100644
--- a/chromium/media/base/android/media_codec_bridge_unittest.cc
+++ b/chromium/media/base/android/media_codec_bridge_unittest.cc
@@ -93,6 +93,15 @@ unsigned char test_mp3[] = {
namespace media {
+// Helper macro to skip the test if MediaCodecBridge isn't available.
+#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
+ do { \
+ if (!MediaCodecBridge::IsAvailable()) { \
+ VLOG(0) << "Could not run test - not supported on device."; \
+ return; \
+ } \
+ } while (0)
+
static const int kPresentationTimeBase = 100;
static inline const base::TimeDelta InfiniteTimeOut() {
@@ -120,10 +129,16 @@ void DecodeMediaFrame(
bool eos = false;
int output_buf_index = -1;
status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
- &output_buf_index, &unused_offset, &size, &new_timestamp, &eos);
-
- if (status == MEDIA_CODEC_OK && output_buf_index > 0)
+ &output_buf_index,
+ &unused_offset,
+ &size,
+ &new_timestamp,
+ &eos,
+ NULL);
+
+ if (status == MEDIA_CODEC_OK && output_buf_index > 0) {
media_codec->ReleaseOutputBuffer(output_buf_index, false);
+ }
// Output time stamp should not be smaller than old timestamp.
ASSERT_TRUE(new_timestamp >= timestamp);
input_pts += base::TimeDelta::FromMicroseconds(33000);
@@ -132,16 +147,15 @@ void DecodeMediaFrame(
}
TEST(MediaCodecBridgeTest, Initialize) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
scoped_ptr<media::MediaCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecH264, false));
+ media_codec.reset(VideoCodecBridge::CreateDecoder(
+ kCodecH264, false, gfx::Size(640, 480), NULL, NULL));
}
TEST(MediaCodecBridgeTest, DoNormal) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
scoped_ptr<media::AudioCodecBridge> media_codec;
media_codec.reset(AudioCodecBridge::Create(kCodecMP3));
@@ -155,9 +169,10 @@ TEST(MediaCodecBridgeTest, DoNormal) {
ASSERT_GE(input_buf_index, 0);
int64 input_pts = kPresentationTimeBase;
- media_codec->QueueInputBuffer(
- input_buf_index, test_mp3, sizeof(test_mp3),
- base::TimeDelta::FromMicroseconds(++input_pts));
+ media_codec->QueueInputBuffer(input_buf_index,
+ test_mp3,
+ sizeof(test_mp3),
+ base::TimeDelta::FromMicroseconds(++input_pts));
status = media_codec->DequeueInputBuffer(InfiniteTimeOut(), &input_buf_index);
media_codec->QueueInputBuffer(
@@ -175,7 +190,12 @@ TEST(MediaCodecBridgeTest, DoNormal) {
base::TimeDelta timestamp;
int output_buf_index = -1;
status = media_codec->DequeueOutputBuffer(InfiniteTimeOut(),
- &output_buf_index, &unused_offset, &size, &timestamp, &eos);
+ &output_buf_index,
+ &unused_offset,
+ &size,
+ &timestamp,
+ &eos,
+ NULL);
switch (status) {
case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
FAIL();
@@ -201,8 +221,7 @@ TEST(MediaCodecBridgeTest, DoNormal) {
}
TEST(MediaCodecBridgeTest, InvalidVorbisHeader) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
scoped_ptr<media::AudioCodecBridge> media_codec;
media_codec.reset(AudioCodecBridge::Create(kCodecVorbis));
@@ -231,18 +250,18 @@ TEST(MediaCodecBridgeTest, InvalidVorbisHeader) {
}
TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- scoped_ptr<VideoCodecBridge> media_codec;
- media_codec.reset(VideoCodecBridge::Create(kCodecVP8, false));
- EXPECT_TRUE(media_codec->Start(
- kCodecVP8, gfx::Size(320, 240), NULL, NULL));
+ scoped_ptr<VideoCodecBridge> media_codec(VideoCodecBridge::CreateDecoder(
+ kCodecVP8, false, gfx::Size(320, 240), NULL, NULL));
+ EXPECT_TRUE(media_codec.get());
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("vp8-I-frame-320x240");
- DecodeMediaFrame(
- media_codec.get(), buffer->data(), buffer->data_size(),
- base::TimeDelta(), base::TimeDelta());
+ DecodeMediaFrame(media_codec.get(),
+ buffer->data(),
+ buffer->data_size(),
+ base::TimeDelta(),
+ base::TimeDelta());
// Simulate a seek to 10 seconds, and each chunk has 2 I-frames.
std::vector<uint8> chunk(buffer->data(),
@@ -250,20 +269,27 @@ TEST(MediaCodecBridgeTest, PresentationTimestampsDoNotDecrease) {
chunk.insert(chunk.end(), buffer->data(),
buffer->data() + buffer->data_size());
media_codec->Reset();
- DecodeMediaFrame(media_codec.get(), &chunk[0], chunk.size(),
+ DecodeMediaFrame(media_codec.get(),
+ &chunk[0],
+ chunk.size(),
base::TimeDelta::FromMicroseconds(10000000),
base::TimeDelta::FromMicroseconds(9900000));
// Simulate a seek to 5 seconds.
media_codec->Reset();
- DecodeMediaFrame(media_codec.get(), &chunk[0], chunk.size(),
+ DecodeMediaFrame(media_codec.get(),
+ &chunk[0],
+ chunk.size(),
base::TimeDelta::FromMicroseconds(5000000),
base::TimeDelta::FromMicroseconds(4900000));
}
TEST(MediaCodecBridgeTest, CreateUnsupportedCodec) {
EXPECT_EQ(NULL, AudioCodecBridge::Create(kUnknownAudioCodec));
- EXPECT_EQ(NULL, VideoCodecBridge::Create(kUnknownVideoCodec, false));
+ EXPECT_EQ(
+ NULL,
+ VideoCodecBridge::CreateDecoder(
+ kUnknownVideoCodec, false, gfx::Size(320, 240), NULL, NULL));
}
} // namespace media
diff --git a/chromium/media/base/android/media_decoder_job.cc b/chromium/media/base/android/media_decoder_job.cc
index 65e9a10b432..c6ad9bbe6f7 100644
--- a/chromium/media/base/android/media_decoder_job.cc
+++ b/chromium/media/base/android/media_decoder_job.cc
@@ -6,9 +6,11 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/bind_to_loop.h"
+#include "media/base/buffers.h"
namespace media {
@@ -26,6 +28,9 @@ MediaDecoderJob::MediaDecoderJob(
media_codec_bridge_(media_codec_bridge),
needs_flush_(false),
input_eos_encountered_(false),
+ output_eos_encountered_(false),
+ skip_eos_enqueue_(true),
+ prerolling_(true),
weak_this_(this),
request_data_cb_(request_data_cb),
access_unit_index_(0),
@@ -41,6 +46,11 @@ void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
DCHECK(ui_loop_->BelongsToCurrentThread());
DCHECK(!on_data_received_cb_.is_null());
+ TRACE_EVENT_ASYNC_END2(
+ "media", "MediaDecoderJob::RequestData", this,
+ "Data type", data.type == media::DemuxerStream::AUDIO ? "AUDIO" : "VIDEO",
+ "Units read", data.access_units.size());
+
base::Closure done_cb = base::ResetAndReturn(&on_data_received_cb_);
if (stop_decode_pending_) {
@@ -59,17 +69,19 @@ void MediaDecoderJob::Prefetch(const base::Closure& prefetch_cb) {
DCHECK(decode_cb_.is_null());
if (HasData()) {
+ DVLOG(1) << __FUNCTION__ << " : using previously received data";
ui_loop_->PostTask(FROM_HERE, prefetch_cb);
return;
}
+ DVLOG(1) << __FUNCTION__ << " : requesting data";
RequestData(prefetch_cb);
}
bool MediaDecoderJob::Decode(
const base::TimeTicks& start_time_ticks,
const base::TimeDelta& start_presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback) {
+ const DecoderCallback& callback) {
DCHECK(decode_cb_.is_null());
DCHECK(on_data_received_cb_.is_null());
DCHECK(ui_loop_->BelongsToCurrentThread());
@@ -114,17 +126,32 @@ void MediaDecoderJob::Flush() {
on_data_received_cb_.Reset();
}
+void MediaDecoderJob::BeginPrerolling(
+ const base::TimeDelta& preroll_timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << preroll_timestamp.InSecondsF() << ")";
+ DCHECK(ui_loop_->BelongsToCurrentThread());
+ DCHECK(!is_decoding());
+
+ preroll_timestamp_ = preroll_timestamp;
+ prerolling_ = true;
+}
+
void MediaDecoderJob::Release() {
DCHECK(ui_loop_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
- destroy_pending_ = is_decoding();
+ // If the decoder job is not waiting for data, and is still decoding, we
+ // cannot delete the job immediately.
+ destroy_pending_ = on_data_received_cb_.is_null() && is_decoding();
request_data_cb_.Reset();
on_data_received_cb_.Reset();
decode_cb_.Reset();
- if (destroy_pending_)
+ if (destroy_pending_) {
+ DVLOG(1) << __FUNCTION__ << " : delete is pending decode completion";
return;
+ }
delete this;
}
@@ -132,6 +159,7 @@ void MediaDecoderJob::Release() {
MediaCodecStatus MediaDecoderJob::QueueInputBuffer(const AccessUnit& unit) {
DVLOG(1) << __FUNCTION__;
DCHECK(decoder_loop_->BelongsToCurrentThread());
+ TRACE_EVENT0("media", __FUNCTION__);
int input_buf_index = input_buf_index_;
input_buf_index_ = -1;
@@ -198,6 +226,8 @@ void MediaDecoderJob::RequestData(const base::Closure& done_cb) {
DCHECK(on_data_received_cb_.is_null());
DCHECK(!input_eos_encountered_);
+ TRACE_EVENT_ASYNC_BEGIN0("media", "MediaDecoderJob::RequestData", this);
+
received_data_ = DemuxerData();
access_unit_index_ = 0;
on_data_received_cb_ = done_cb;
@@ -211,6 +241,19 @@ void MediaDecoderJob::DecodeNextAccessUnit(
DCHECK(ui_loop_->BelongsToCurrentThread());
DCHECK(!decode_cb_.is_null());
+ // If the first access unit is a config change, request the player to dequeue
+ // the input buffer again so that it can request config data.
+ if (received_data_.access_units[access_unit_index_].status ==
+ DemuxerStream::kConfigChanged) {
+ ui_loop_->PostTask(FROM_HERE,
+ base::Bind(&MediaDecoderJob::OnDecodeCompleted,
+ base::Unretained(this),
+ MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER,
+ kNoTimestamp(),
+ 0));
+ return;
+ }
+
decoder_loop_->PostTask(FROM_HERE, base::Bind(
&MediaDecoderJob::DecodeInternal, base::Unretained(this),
received_data_.access_units[access_unit_index_],
@@ -228,31 +271,49 @@ void MediaDecoderJob::DecodeInternal(
const MediaDecoderJob::DecoderCallback& callback) {
DVLOG(1) << __FUNCTION__;
DCHECK(decoder_loop_->BelongsToCurrentThread());
+ TRACE_EVENT0("media", __FUNCTION__);
if (needs_flush) {
DVLOG(1) << "DecodeInternal needs flush.";
input_eos_encountered_ = false;
+ output_eos_encountered_ = false;
MediaCodecStatus reset_status = media_codec_bridge_->Reset();
if (MEDIA_CODEC_OK != reset_status) {
- callback.Run(reset_status, start_presentation_timestamp, 0);
+ callback.Run(reset_status, kNoTimestamp(), 0);
return;
}
}
+ // Once output EOS has occurred, we should not be asked to decode again.
+ // MediaCodec has undefined behavior if similarly asked to decode after output
+ // EOS.
+ DCHECK(!output_eos_encountered_);
+
// For aborted access unit, just skip it and inform the player.
if (unit.status == DemuxerStream::kAborted) {
// TODO(qinmin): use a new enum instead of MEDIA_CODEC_STOPPED.
- callback.Run(MEDIA_CODEC_STOPPED, start_presentation_timestamp, 0);
+ callback.Run(MEDIA_CODEC_STOPPED, kNoTimestamp(), 0);
return;
}
+ if (skip_eos_enqueue_) {
+ if (unit.end_of_stream || unit.data.empty()) {
+ input_eos_encountered_ = true;
+ output_eos_encountered_ = true;
+ callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(), 0);
+ return;
+ }
+
+ skip_eos_enqueue_ = false;
+ }
+
MediaCodecStatus input_status = MEDIA_CODEC_INPUT_END_OF_STREAM;
if (!input_eos_encountered_) {
input_status = QueueInputBuffer(unit);
if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM) {
input_eos_encountered_ = true;
} else if (input_status != MEDIA_CODEC_OK) {
- callback.Run(input_status, start_presentation_timestamp, 0);
+ callback.Run(input_status, kNoTimestamp(), 0);
return;
}
}
@@ -261,55 +322,76 @@ void MediaDecoderJob::DecodeInternal(
size_t offset = 0;
size_t size = 0;
base::TimeDelta presentation_timestamp;
- bool output_eos_encountered = false;
base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(
kMediaCodecTimeoutInMilliseconds);
- MediaCodecStatus status = media_codec_bridge_->DequeueOutputBuffer(
- timeout, &buffer_index, &offset, &size, &presentation_timestamp,
- &output_eos_encountered);
+ MediaCodecStatus status =
+ media_codec_bridge_->DequeueOutputBuffer(timeout,
+ &buffer_index,
+ &offset,
+ &size,
+ &presentation_timestamp,
+ &output_eos_encountered_,
+ NULL);
if (status != MEDIA_CODEC_OK) {
- if (status == MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED) {
- if (media_codec_bridge_->GetOutputBuffers())
- status = MEDIA_CODEC_OK;
- else
- status = MEDIA_CODEC_ERROR;
+ if (status == MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED &&
+ !media_codec_bridge_->GetOutputBuffers()) {
+ status = MEDIA_CODEC_ERROR;
}
- callback.Run(status, start_presentation_timestamp, 0);
+ callback.Run(status, kNoTimestamp(), 0);
return;
}
// TODO(xhwang/qinmin): This logic is correct but strange. Clean it up.
- if (output_eos_encountered)
+ if (output_eos_encountered_)
status = MEDIA_CODEC_OUTPUT_END_OF_STREAM;
else if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM)
status = MEDIA_CODEC_INPUT_END_OF_STREAM;
+ // Check whether we need to render the output.
+ // TODO(qinmin): comparing most recently queued input's |unit.timestamp| with
+ // |preroll_timestamp_| is not accurate due to data reordering and possible
+ // input queueing without immediate dequeue when |input_status| !=
+ // |MEDIA_CODEC_OK|. Need to use the |presentation_timestamp| for video, and
+ // use |size| to calculate the timestamp for audio. See
+ // http://crbug.com/310823 and http://b/11356652.
+ bool render_output = unit.timestamp >= preroll_timestamp_ &&
+ (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u);
base::TimeDelta time_to_render;
DCHECK(!start_time_ticks.is_null());
- if (ComputeTimeToRender()) {
+ if (render_output && ComputeTimeToRender()) {
time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
start_time_ticks + start_presentation_timestamp);
}
- // TODO(acolwell): Change to > since the else will never run for audio.
- if (time_to_render >= base::TimeDelta()) {
+ if (time_to_render > base::TimeDelta()) {
decoder_loop_->PostDelayedTask(
FROM_HERE,
base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
- weak_this_.GetWeakPtr(), buffer_index, size,
- presentation_timestamp, callback, status),
+ weak_this_.GetWeakPtr(), buffer_index, size, render_output,
+ base::Bind(callback, status, presentation_timestamp)),
time_to_render);
return;
}
// TODO(qinmin): The codec is lagging behind, need to recalculate the
- // |start_presentation_timestamp_| and |start_time_ticks_|.
+ // |start_presentation_timestamp_| and |start_time_ticks_| in
+ // media_source_player.cc.
DVLOG(1) << "codec is lagging behind :" << time_to_render.InMicroseconds();
- ReleaseOutputBuffer(buffer_index, size, presentation_timestamp,
- callback, status);
+ if (render_output) {
+ // The player won't expect a timestamp smaller than the
+ // |start_presentation_timestamp|. However, this could happen due to decoder
+ // errors.
+ presentation_timestamp = std::max(
+ presentation_timestamp, start_presentation_timestamp);
+ } else {
+ presentation_timestamp = kNoTimestamp();
+ }
+ ReleaseOutputCompletionCallback completion_callback = base::Bind(
+ callback, status, presentation_timestamp);
+ ReleaseOutputBuffer(buffer_index, size, render_output, completion_callback);
}
void MediaDecoderJob::OnDecodeCompleted(
@@ -318,11 +400,17 @@ void MediaDecoderJob::OnDecodeCompleted(
DCHECK(ui_loop_->BelongsToCurrentThread());
if (destroy_pending_) {
+ DVLOG(1) << __FUNCTION__ << " : completing pending deletion";
delete this;
return;
}
DCHECK(!decode_cb_.is_null());
+
+ // If output was queued for rendering, then we have completed prerolling.
+ if (presentation_timestamp != kNoTimestamp())
+ prerolling_ = false;
+
switch (status) {
case MEDIA_CODEC_OK:
case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
diff --git a/chromium/media/base/android/media_decoder_job.h b/chromium/media/base/android/media_decoder_job.h
index d5a93b977c7..6ee086dea03 100644
--- a/chromium/media/base/android/media_decoder_job.h
+++ b/chromium/media/base/android/media_decoder_job.h
@@ -27,8 +27,14 @@ class MediaDecoderJob {
// Callback when a decoder job finishes its work. Args: whether decode
// finished successfully, presentation time, audio output bytes.
+ // If the presentation time is equal to kNoTimestamp(), the decoder job
+ // skipped rendering of the decoded output and the callback target should
+ // update its clock to avoid introducing extra delays to the next frame.
typedef base::Callback<void(MediaCodecStatus, const base::TimeDelta&,
size_t)> DecoderCallback;
+ // Callback when a decoder job finishes releasing the output buffer.
+ // Args: audio output bytes, must be 0 for video.
+ typedef base::Callback<void(size_t)> ReleaseOutputCompletionCallback;
virtual ~MediaDecoderJob();
@@ -63,6 +69,11 @@ class MediaDecoderJob {
// Flush the decoder.
void Flush();
+ // Enter prerolling state. The job must not currently be decoding.
+ void BeginPrerolling(const base::TimeDelta& preroll_timestamp);
+
+ bool prerolling() const { return prerolling_; }
+
bool is_decoding() const { return !decode_cb_.is_null(); }
protected:
@@ -70,12 +81,13 @@ class MediaDecoderJob {
MediaCodecBridge* media_codec_bridge,
const base::Closure& request_data_cb);
- // Release the output buffer and render it.
+ // Release the output buffer at index |output_buffer_index| and render it if
+ // |render_output| is true. Upon completion, |callback| will be called.
virtual void ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const DecoderCallback& callback,
- MediaCodecStatus status) = 0;
+ int output_buffer_index,
+ size_t size,
+ bool render_output,
+ const ReleaseOutputCompletionCallback& callback) = 0;
// Returns true if the "time to render" needs to be computed for frames in
// this decoder job.
@@ -112,6 +124,8 @@ class MediaDecoderJob {
const DecoderCallback& callback);
// Called on the UI thread to indicate that one decode cycle has completed.
+ // Completes any pending job destruction or any pending decode stop. If
+ // destruction was not pending, passes its arguments to |decode_cb_|.
void OnDecodeCompleted(MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp,
size_t audio_output_bytes);
@@ -130,8 +144,31 @@ class MediaDecoderJob {
bool needs_flush_;
// Whether input EOS is encountered.
+ // TODO(wolenetz/qinmin): Protect with a lock. See http://crbug.com/320043.
bool input_eos_encountered_;
+ // Whether output EOS is encountered.
+ bool output_eos_encountered_;
+
+ // Tracks whether DecodeInternal() should skip decoding if the first access
+ // unit is EOS or empty, and report |MEDIA_CODEC_OUTPUT_END_OF_STREAM|. This
+ // is to work around some decoders that could crash otherwise. See
+ // http://b/11696552.
+ bool skip_eos_enqueue_;
+
+ // The timestamp the decoder needs to preroll to. If an access unit's
+ // timestamp is smaller than |preroll_timestamp_|, don't render it.
+ // TODO(qinmin): Comparing access unit's timestamp with |preroll_timestamp_|
+ // is not very accurate.
+ base::TimeDelta preroll_timestamp_;
+
+ // Indicates prerolling state. If true, this job has not yet decoded output
+ // that it will render, since the most recent of job construction or
+ // BeginPrerolling(). If false, |preroll_timestamp_| has been reached.
+ // TODO(qinmin): Comparing access unit's timestamp with |preroll_timestamp_|
+ // is not very accurate.
+ bool prerolling_;
+
// Weak pointer passed to media decoder jobs for callbacks. It is bounded to
// the decoder thread.
base::WeakPtrFactory<MediaDecoderJob> weak_this_;
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index 95f38eddef9..95085fea4ed 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -11,6 +11,7 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/strings/string_util.h"
#include "jni/MediaDrmBridge_jni.h"
#include "media/base/android/media_player_manager.h"
@@ -141,8 +142,6 @@ static bool GetPsshData(const uint8* data, int data_size,
return false;
}
-bool MediaDrmBridge::can_use_media_drm_ = false;
-
static MediaDrmBridge::SecurityLevel GetSecurityLevelFromString(
const std::string& security_level_str) {
if (0 == security_level_str.compare("L1"))
@@ -175,8 +174,7 @@ scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
// static
bool MediaDrmBridge::IsAvailable() {
- return can_use_media_drm_ &&
- base::android::BuildInfo::GetInstance()->sdk_int() >= 18;
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 19;
}
// static
@@ -238,9 +236,10 @@ MediaDrmBridge::~MediaDrmBridge() {
Java_MediaDrmBridge_release(env, j_media_drm_.obj());
}
-bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
- const uint8* init_data,
- int init_data_length) {
+bool MediaDrmBridge::CreateSession(uint32 session_id,
+ const std::string& type,
+ const uint8* init_data,
+ int init_data_length) {
std::vector<uint8> pssh_data;
if (!GetPsshData(init_data, init_data_length, scheme_uuid_, &pssh_data))
return false;
@@ -249,30 +248,26 @@ bool MediaDrmBridge::GenerateKeyRequest(const std::string& type,
ScopedJavaLocalRef<jbyteArray> j_pssh_data =
base::android::ToJavaByteArray(env, &pssh_data[0], pssh_data.size());
ScopedJavaLocalRef<jstring> j_mime = ConvertUTF8ToJavaString(env, type);
- Java_MediaDrmBridge_generateKeyRequest(
- env, j_media_drm_.obj(), j_pssh_data.obj(), j_mime.obj());
+ Java_MediaDrmBridge_createSession(
+ env, j_media_drm_.obj(), session_id, j_pssh_data.obj(), j_mime.obj());
return true;
}
-void MediaDrmBridge::AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) {
+void MediaDrmBridge::UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) {
DVLOG(1) << __FUNCTION__;
JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jbyteArray> j_key_data =
- base::android::ToJavaByteArray(env, key, key_length);
- ScopedJavaLocalRef<jstring> j_session_id =
- ConvertUTF8ToJavaString(env, session_id);
- Java_MediaDrmBridge_addKey(
- env, j_media_drm_.obj(), j_session_id.obj(), j_key_data.obj());
+ ScopedJavaLocalRef<jbyteArray> j_response =
+ base::android::ToJavaByteArray(env, response, response_length);
+ Java_MediaDrmBridge_updateSession(
+ env, j_media_drm_.obj(), session_id, j_response.obj());
}
-void MediaDrmBridge::CancelKeyRequest(const std::string& session_id) {
+void MediaDrmBridge::ReleaseSession(uint32 session_id) {
+ DVLOG(1) << __FUNCTION__;
JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jstring> j_session_id =
- ConvertUTF8ToJavaString(env, session_id);
- Java_MediaDrmBridge_cancelKeyRequest(
- env, j_media_drm_.obj(), j_session_id.obj());
+ Java_MediaDrmBridge_releaseSession(env, j_media_drm_.obj(), session_id);
}
void MediaDrmBridge::SetMediaCryptoReadyCB(const base::Closure& closure) {
@@ -297,28 +292,48 @@ void MediaDrmBridge::OnMediaCryptoReady(JNIEnv* env, jobject) {
base::ResetAndReturn(&media_crypto_ready_cb_).Run();
}
-void MediaDrmBridge::OnKeyMessage(JNIEnv* env,
- jobject j_media_drm,
- jstring j_session_id,
- jbyteArray j_message,
- jstring j_destination_url) {
- std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
+void MediaDrmBridge::OnSessionCreated(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id,
+ jstring j_web_session_id) {
+ uint32 session_id = j_session_id;
+ std::string web_session_id = ConvertJavaStringToUTF8(env, j_web_session_id);
+ manager_->OnSessionCreated(media_keys_id_, session_id, web_session_id);
+}
+
+void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id,
+ jbyteArray j_message,
+ jstring j_destination_url) {
+ uint32 session_id = j_session_id;
std::vector<uint8> message;
JavaByteArrayToByteVector(env, j_message, &message);
std::string destination_url = ConvertJavaStringToUTF8(env, j_destination_url);
+ manager_->OnSessionMessage(
+ media_keys_id_, session_id, message, destination_url);
+}
- manager_->OnKeyMessage(media_keys_id_, session_id, message, destination_url);
+void MediaDrmBridge::OnSessionReady(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id) {
+ uint32 session_id = j_session_id;
+ manager_->OnSessionReady(media_keys_id_, session_id);
}
-void MediaDrmBridge::OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id) {
- std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
- manager_->OnKeyAdded(media_keys_id_, session_id);
+void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id) {
+ uint32 session_id = j_session_id;
+ manager_->OnSessionClosed(media_keys_id_, session_id);
}
-void MediaDrmBridge::OnKeyError(JNIEnv* env, jobject, jstring j_session_id) {
- // |j_session_id| can be NULL, in which case we'll return an empty string.
- std::string session_id = ConvertJavaStringToUTF8(env, j_session_id);
- manager_->OnKeyError(media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
+void MediaDrmBridge::OnSessionError(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id) {
+ uint32 session_id = j_session_id;
+ manager_->OnSessionError(
+ media_keys_id_, session_id, MediaKeys::kUnknownError, 0);
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 0399beb1d7d..76149a6a609 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -6,6 +6,8 @@
#define MEDIA_BASE_ANDROID_MEDIA_DRM_BRIDGE_H_
#include <jni.h>
+#include <map>
+#include <queue>
#include <string>
#include <vector>
@@ -59,13 +61,14 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
static bool RegisterMediaDrmBridge(JNIEnv* env);
// MediaKeys implementations.
- virtual bool GenerateKeyRequest(const std::string& type,
- const uint8* init_data,
- int init_data_length) OVERRIDE;
- virtual void AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) OVERRIDE;
- virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
+ virtual bool CreateSession(uint32 session_id,
+ const std::string& type,
+ const uint8* init_data,
+ int init_data_length) OVERRIDE;
+ virtual void UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) OVERRIDE;
+ virtual void ReleaseSession(uint32 session_id) OVERRIDE;
// Returns a MediaCrypto object if it's already created. Returns a null object
// otherwise.
@@ -76,17 +79,21 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
void SetMediaCryptoReadyCB(const base::Closure& closure);
// Called after a MediaCrypto object is created.
- void OnMediaCryptoReady(JNIEnv* env, jobject);
-
- // Called after we got the response for GenerateKeyRequest().
- void OnKeyMessage(JNIEnv* env, jobject, jstring j_session_id,
- jbyteArray message, jstring destination_url);
-
- // Called when key is added.
- void OnKeyAdded(JNIEnv* env, jobject, jstring j_session_id);
-
- // Called when error happens.
- void OnKeyError(JNIEnv* env, jobject, jstring j_session_id);
+ void OnMediaCryptoReady(JNIEnv* env, jobject j_media_drm);
+
+ // Callbacks for firing session events.
+ void OnSessionCreated(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id,
+ jstring j_web_session_id);
+ void OnSessionMessage(JNIEnv* env,
+ jobject j_media_drm,
+ jint j_session_id,
+ jbyteArray j_message,
+ jstring j_destination_url);
+ void OnSessionReady(JNIEnv* env, jobject j_media_drm, jint j_session_id);
+ void OnSessionClosed(JNIEnv* env, jobject j_media_drm, jint j_session_id);
+ void OnSessionError(JNIEnv* env, jobject j_media_drm, jint j_session_id);
// Reset the device credentials.
void ResetDeviceCredentials(const ResetCredentialsCB& callback);
@@ -102,15 +109,9 @@ class MEDIA_EXPORT MediaDrmBridge : public MediaKeys {
GURL frame_url() const { return frame_url_; }
- static void set_can_use_media_drm(bool can_use_media_drm) {
- can_use_media_drm_ = can_use_media_drm;
- }
-
private:
static bool IsSecureDecoderRequired(SecurityLevel security_level);
- static bool can_use_media_drm_;
-
MediaDrmBridge(int media_keys_id,
const std::vector<uint8>& scheme_uuid,
const GURL& frame_url,
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
index 101ab436df1..c2e00947af6 100644
--- a/chromium/media/base/android/media_player_android.cc
+++ b/chromium/media/base/android/media_player_android.cc
@@ -19,48 +19,8 @@ MediaPlayerAndroid::MediaPlayerAndroid(
MediaPlayerAndroid::~MediaPlayerAndroid() {}
-void MediaPlayerAndroid::OnMediaError(int error_type) {
- manager_->OnError(player_id_, error_type);
-}
-
-void MediaPlayerAndroid::OnVideoSizeChanged(int width, int height) {
- manager_->OnVideoSizeChanged(player_id_, width, height);
-}
-
-void MediaPlayerAndroid::OnBufferingUpdate(int percent) {
- manager_->OnBufferingUpdate(player_id_, percent);
-}
-
-void MediaPlayerAndroid::OnPlaybackComplete() {
- manager_->OnPlaybackComplete(player_id_);
-}
-
-void MediaPlayerAndroid::OnMediaInterrupted() {
- manager_->OnMediaInterrupted(player_id_);
-}
-
-void MediaPlayerAndroid::OnSeekComplete() {
- manager_->OnSeekComplete(player_id_, GetCurrentTime());
-}
-
-void MediaPlayerAndroid::OnTimeUpdated() {
- manager_->OnTimeUpdate(player_id_, GetCurrentTime());
-}
-
-void MediaPlayerAndroid::OnMediaMetadataChanged(
- base::TimeDelta duration, int width, int height, bool success) {
- manager_->OnMediaMetadataChanged(
- player_id_, duration, width, height, success);
-}
-
-void MediaPlayerAndroid::RequestMediaResourcesFromManager() {
- if (manager_)
- manager_->RequestMediaResources(player_id_);
-}
-
-void MediaPlayerAndroid::ReleaseMediaResourcesFromManager() {
- if (manager_)
- manager_->ReleaseMediaResources(player_id_);
+bool MediaPlayerAndroid::IsRemote() const {
+ return false;
}
GURL MediaPlayerAndroid::GetUrl() {
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
index 0968d3513f9..27a6432d9e2 100644
--- a/chromium/media/base/android/media_player_android.h
+++ b/chromium/media/base/android/media_player_android.h
@@ -43,9 +43,10 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Pause the media.
virtual void Pause(bool is_media_related_action) = 0;
- // Seek to a particular position. When succeeds, OnSeekComplete() will be
- // called. Otherwise, nothing will happen.
- virtual void SeekTo(base::TimeDelta time) = 0;
+ // Seek to a particular position, based on renderer signaling actual seek
+ // with MediaPlayerHostMsg_Seek. If eventual success, OnSeekComplete() will be
+ // called.
+ virtual void SeekTo(const base::TimeDelta& timestamp) = 0;
// Release the player resources.
virtual void Release() = 0;
@@ -54,6 +55,7 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual void SetVolume(double volume) = 0;
// Get the media information from the player.
+ virtual bool IsRemote() const;
virtual int GetVideoWidth() = 0;
virtual int GetVideoHeight() = 0;
virtual base::TimeDelta GetDuration() = 0;
@@ -79,21 +81,6 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MediaPlayerAndroid(int player_id,
MediaPlayerManager* manager);
- // Called when player status changes.
- virtual void OnMediaError(int error_type);
- virtual void OnVideoSizeChanged(int width, int height);
- virtual void OnBufferingUpdate(int percent);
- virtual void OnPlaybackComplete();
- virtual void OnSeekComplete();
- virtual void OnMediaMetadataChanged(
- base::TimeDelta duration, int width, int height, bool success);
- virtual void OnMediaInterrupted();
- virtual void OnTimeUpdated();
-
- // Request or release decoding resources from |manager_|.
- virtual void RequestMediaResourcesFromManager();
- virtual void ReleaseMediaResourcesFromManager();
-
MediaPlayerManager* manager() { return manager_; }
private:
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index f570bdc8677..0f79c13ad93 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -9,6 +9,7 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/strings/string_util.h"
#include "jni/MediaPlayerBridge_jni.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_resource_getter.h"
@@ -45,6 +46,11 @@ MediaPlayerBridge::MediaPlayerBridge(
}
MediaPlayerBridge::~MediaPlayerBridge() {
+ if (!j_media_player_bridge_.is_null()) {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+ Java_MediaPlayerBridge_destroy(env, j_media_player_bridge_.obj());
+ }
Release();
}
@@ -72,7 +78,8 @@ void MediaPlayerBridge::CreateJavaMediaPlayerBridge() {
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
- j_media_player_bridge_.Reset(Java_MediaPlayerBridge_create(env));
+ j_media_player_bridge_.Reset(Java_MediaPlayerBridge_create(
+ env, reinterpret_cast<intptr_t>(this)));
SetMediaPlayerListener();
}
@@ -85,6 +92,13 @@ void MediaPlayerBridge::SetJavaMediaPlayerBridge(
j_media_player_bridge_.Reset(env, j_media_player_bridge);
}
+base::android::ScopedJavaLocalRef<jobject> MediaPlayerBridge::
+ GetJavaMediaPlayerBridge() {
+ base::android::ScopedJavaLocalRef<jobject> j_bridge(
+ j_media_player_bridge_);
+ return j_bridge;
+}
+
void MediaPlayerBridge::SetMediaPlayerListener() {
jobject j_context = base::android::GetApplicationContext();
DCHECK(j_context);
@@ -111,8 +125,8 @@ void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
}
void MediaPlayerBridge::Prepare() {
- if (j_media_player_bridge_.is_null())
- CreateJavaMediaPlayerBridge();
+ DCHECK(j_media_player_bridge_.is_null());
+ CreateJavaMediaPlayerBridge();
if (url_.SchemeIsFileSystem()) {
manager()->GetMediaResourceGetter()->GetPlatformPathFromFileSystemURL(
url_, base::Bind(&MediaPlayerBridge::SetDataSource,
@@ -137,15 +151,37 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
jobject j_context = base::android::GetApplicationContext();
DCHECK(j_context);
- if (Java_MediaPlayerBridge_setDataSource(
+ const std::string data_uri_prefix("data:");
+ if (StartsWithASCII(url, data_uri_prefix, true)) {
+ if (!Java_MediaPlayerBridge_setDataUriDataSource(
+ env, j_media_player_bridge_.obj(), j_context, j_url_string.obj())) {
+ OnMediaError(MEDIA_ERROR_FORMAT);
+ }
+ return;
+ }
+
+ if (!Java_MediaPlayerBridge_setDataSource(
env, j_media_player_bridge_.obj(), j_context, j_url_string.obj(),
j_cookies.obj(), hide_url_log_)) {
- RequestMediaResourcesFromManager();
- Java_MediaPlayerBridge_prepareAsync(
- env, j_media_player_bridge_.obj());
- } else {
OnMediaError(MEDIA_ERROR_FORMAT);
+ return;
}
+
+ manager()->RequestMediaResources(player_id());
+ if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
+ OnMediaError(MEDIA_ERROR_FORMAT);
+}
+
+void MediaPlayerBridge::OnDidSetDataUriDataSource(JNIEnv* env, jobject obj,
+ jboolean success) {
+ if (!success) {
+ OnMediaError(MEDIA_ERROR_FORMAT);
+ return;
+ }
+
+ manager()->RequestMediaResources(player_id());
+ if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
+ OnMediaError(MEDIA_ERROR_FORMAT);
}
void MediaPlayerBridge::OnCookiesRetrieved(const std::string& cookies) {
@@ -166,7 +202,8 @@ void MediaPlayerBridge::OnMediaMetadataExtracted(
width_ = width;
height_ = height;
}
- OnMediaMetadataChanged(duration_, width_, height_, success);
+ manager()->OnMediaMetadataChanged(
+ player_id(), duration_, width_, height_, success);
}
void MediaPlayerBridge::Start() {
@@ -219,14 +256,14 @@ int MediaPlayerBridge::GetVideoHeight() {
env, j_media_player_bridge_.obj());
}
-void MediaPlayerBridge::SeekTo(base::TimeDelta time) {
+void MediaPlayerBridge::SeekTo(const base::TimeDelta& timestamp) {
// Record the time to seek when OnMediaPrepared() is called.
- pending_seek_ = time;
+ pending_seek_ = timestamp;
if (j_media_player_bridge_.is_null())
Prepare();
else if (prepared_)
- SeekInternal(time);
+ SeekInternal(timestamp);
}
base::TimeDelta MediaPlayerBridge::GetCurrentTime() {
@@ -261,7 +298,7 @@ void MediaPlayerBridge::Release() {
JNIEnv* env = base::android::AttachCurrentThread();
Java_MediaPlayerBridge_release(env, j_media_player_bridge_.obj());
j_media_player_bridge_.Reset();
- ReleaseMediaResourcesFromManager();
+ manager()->ReleaseMediaResources(player_id());
listener_.ReleaseMediaPlayerListenerResources();
}
@@ -278,17 +315,29 @@ void MediaPlayerBridge::SetVolume(double volume) {
void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
width_ = width;
height_ = height;
- MediaPlayerAndroid::OnVideoSizeChanged(width, height);
+ manager()->OnVideoSizeChanged(player_id(), width, height);
+}
+
+void MediaPlayerBridge::OnMediaError(int error_type) {
+ manager()->OnError(player_id(), error_type);
+}
+
+void MediaPlayerBridge::OnBufferingUpdate(int percent) {
+ manager()->OnBufferingUpdate(player_id(), percent);
}
void MediaPlayerBridge::OnPlaybackComplete() {
time_update_timer_.Stop();
- MediaPlayerAndroid::OnPlaybackComplete();
+ manager()->OnPlaybackComplete(player_id());
}
void MediaPlayerBridge::OnMediaInterrupted() {
time_update_timer_.Stop();
- MediaPlayerAndroid::OnMediaInterrupted();
+ manager()->OnMediaInterrupted(player_id());
+}
+
+void MediaPlayerBridge::OnSeekComplete() {
+ manager()->OnSeekComplete(player_id(), GetCurrentTime());
}
void MediaPlayerBridge::OnMediaPrepared() {
@@ -307,17 +356,25 @@ void MediaPlayerBridge::OnMediaPrepared() {
pending_play_ = false;
}
- GetAllowedOperations();
- OnMediaMetadataChanged(duration_, width_, height_, true);
+ UpdateAllowedOperations();
+ manager()->OnMediaMetadataChanged(
+ player_id(), duration_, width_, height_, true);
+}
+
+ScopedJavaLocalRef<jobject> MediaPlayerBridge::GetAllowedOperations() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ CHECK(env);
+
+ return Java_MediaPlayerBridge_getAllowedOperations(
+ env, j_media_player_bridge_.obj());
}
-void MediaPlayerBridge::GetAllowedOperations() {
+void MediaPlayerBridge::UpdateAllowedOperations() {
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
- ScopedJavaLocalRef<jobject> allowedOperations =
- Java_MediaPlayerBridge_getAllowedOperations(
- env, j_media_player_bridge_.obj());
+ ScopedJavaLocalRef<jobject> allowedOperations = GetAllowedOperations();
+
can_pause_ = Java_AllowedOperations_canPause(env, allowedOperations.obj());
can_seek_forward_ = Java_AllowedOperations_canSeekForward(
env, allowedOperations.obj());
@@ -332,7 +389,7 @@ void MediaPlayerBridge::StartInternal() {
time_update_timer_.Start(
FROM_HERE,
base::TimeDelta::FromMilliseconds(kTimeUpdateInterval),
- this, &MediaPlayerBridge::OnTimeUpdated);
+ this, &MediaPlayerBridge::OnTimeUpdateTimerFired);
}
}
@@ -342,7 +399,7 @@ void MediaPlayerBridge::PauseInternal() {
time_update_timer_.Stop();
}
-void MediaPlayerBridge::PendingSeekInternal(base::TimeDelta time) {
+void MediaPlayerBridge::PendingSeekInternal(const base::TimeDelta& time) {
SeekInternal(time);
}
@@ -364,6 +421,10 @@ void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
env, j_media_player_bridge_.obj(), time_msec);
}
+void MediaPlayerBridge::OnTimeUpdateTimerFired() {
+ manager()->OnTimeUpdate(player_id(), GetCurrentTime());
+}
+
bool MediaPlayerBridge::RegisterMediaPlayerBridge(JNIEnv* env) {
bool ret = RegisterNativesImpl(env);
DCHECK(g_MediaPlayerBridge_clazz);
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index 7bd4beb082f..402cb49858b 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -56,7 +56,7 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
- virtual void SeekTo(base::TimeDelta time) OVERRIDE;
+ virtual void SeekTo(const base::TimeDelta& timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
@@ -71,17 +71,22 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual GURL GetUrl() OVERRIDE;
virtual GURL GetFirstPartyForCookies() OVERRIDE;
+ // MediaPlayerListener callbacks.
+ void OnVideoSizeChanged(int width, int height);
+ void OnMediaError(int error_type);
+ void OnBufferingUpdate(int percent);
+ void OnPlaybackComplete();
+ void OnMediaInterrupted();
+ void OnSeekComplete();
+ void OnDidSetDataUriDataSource(JNIEnv* env, jobject obj, jboolean success);
+
protected:
void SetJavaMediaPlayerBridge(jobject j_media_player_bridge);
+ base::android::ScopedJavaLocalRef<jobject> GetJavaMediaPlayerBridge();
void SetMediaPlayerListener();
void SetDuration(base::TimeDelta time);
- // MediaPlayerAndroid implementation.
- virtual void OnVideoSizeChanged(int width, int height) OVERRIDE;
- virtual void OnPlaybackComplete() OVERRIDE;
- virtual void OnMediaInterrupted() OVERRIDE;
-
- virtual void PendingSeekInternal(base::TimeDelta time);
+ virtual void PendingSeekInternal(const base::TimeDelta& time);
// Prepare the player for playback, asynchronously. When succeeds,
// OnMediaPrepared() will be called. Otherwise, OnMediaError() will
@@ -92,6 +97,9 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// Create the corresponding Java class instance.
virtual void CreateJavaMediaPlayerBridge();
+ // Get allowed operations from the player.
+ virtual base::android::ScopedJavaLocalRef<jobject> GetAllowedOperations();
+
private:
// Set the data source for the media player.
void SetDataSource(const std::string& url);
@@ -101,8 +109,11 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
void PauseInternal();
void SeekInternal(base::TimeDelta time);
- // Get allowed operations from the player.
- void GetAllowedOperations();
+ // Called when |time_update_timer_| fires.
+ void OnTimeUpdateTimerFired();
+
+ // Update allowed operations from the player.
+ void UpdateAllowedOperations();
// Callback function passed to |resource_getter_|. Called when the cookies
// are retrieved.
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
index 4ecac22518b..c215df59f4b 100644
--- a/chromium/media/base/android/media_player_manager.h
+++ b/chromium/media/base/android/media_player_manager.h
@@ -62,7 +62,9 @@ class MEDIA_EXPORT MediaPlayerManager {
virtual void OnBufferingUpdate(int player_id, int percentage) = 0;
// Called when seek completed. Args: player ID, current time.
- virtual void OnSeekComplete(int player_id, base::TimeDelta current_time) = 0;
+ virtual void OnSeekComplete(
+ int player_id,
+ const base::TimeDelta& current_time) = 0;
// Called when error happens. Args: player ID, error type.
virtual void OnError(int player_id, int error) = 0;
@@ -85,25 +87,33 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called by the player to get a hardware protected surface.
virtual void OnProtectedSurfaceRequested(int player_id) = 0;
- // TODO(xhwang): The following three methods needs to be decoupled from
- // MediaPlayerManager to support the W3C Working Draft version of the EME
- // spec.
-
- // Called when MediaDrmBridge wants to send a KeyAdded.
- virtual void OnKeyAdded(int media_keys_id,
- const std::string& session_id) = 0;
-
- // Called when MediaDrmBridge wants to send a KeyError.
- virtual void OnKeyError(int media_keys_id,
- const std::string& session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) = 0;
-
- // Called when MediaDrmBridge wants to send a KeyMessage.
- virtual void OnKeyMessage(int media_keys_id,
- const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) = 0;
+ // The following five methods are related to EME.
+ // TODO(xhwang): These methods needs to be decoupled from MediaPlayerManager
+ // to support the W3C Working Draft version of the EME spec.
+ // http://crbug.com/315312
+
+ // Called when MediaDrmBridge determines a SessionId.
+ virtual void OnSessionCreated(int media_keys_id,
+ uint32 session_id,
+ const std::string& web_session_id) = 0;
+
+ // Called when MediaDrmBridge wants to send a Message event.
+ virtual void OnSessionMessage(int media_keys_id,
+ uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) = 0;
+
+ // Called when MediaDrmBridge wants to send a Ready event.
+ virtual void OnSessionReady(int media_keys_id, uint32 session_id) = 0;
+
+ // Called when MediaDrmBridge wants to send a Closed event.
+ virtual void OnSessionClosed(int media_keys_id, uint32 session_id) = 0;
+
+ // Called when MediaDrmBridge wants to send an Error event.
+ virtual void OnSessionError(int media_keys_id,
+ uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) = 0;
};
} // namespace media
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
index 223515e9992..ee84528a87c 100644
--- a/chromium/media/base/android/media_source_player.cc
+++ b/chromium/media/base/android/media_source_player.cc
@@ -11,12 +11,16 @@
#include "base/barrier_closure.h"
#include "base/basictypes.h"
#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/debug/trace_event.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "media/base/android/audio_decoder_job.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/video_decoder_job.h"
#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
namespace {
@@ -59,36 +63,36 @@ bool MediaSourcePlayer::IsTypeSupported(
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager,
- int demuxer_client_id,
- DemuxerAndroid* demuxer)
+ scoped_ptr<DemuxerAndroid> demuxer)
: MediaPlayerAndroid(player_id, manager),
- demuxer_client_id_(demuxer_client_id),
- demuxer_(demuxer),
+ demuxer_(demuxer.Pass()),
pending_event_(NO_EVENT_PENDING),
- seek_request_id_(0),
width_(0),
height_(0),
audio_codec_(kUnknownAudioCodec),
video_codec_(kUnknownVideoCodec),
num_channels_(0),
sampling_rate_(0),
- audio_finished_(true),
- video_finished_(true),
+ reached_audio_eos_(false),
+ reached_video_eos_(false),
playing_(false),
is_audio_encrypted_(false),
is_video_encrypted_(false),
volume_(-1.0),
clock_(&default_tick_clock_),
+ next_video_data_is_iframe_(true),
+ doing_browser_seek_(false),
+ pending_seek_(false),
reconfig_audio_decoder_(false),
reconfig_video_decoder_(false),
weak_this_(this),
drm_bridge_(NULL),
is_waiting_for_key_(false) {
- demuxer_->AddDemuxerClient(demuxer_client_id_, this);
+ demuxer_->Initialize(this);
+ clock_.SetMaxTime(base::TimeDelta());
}
MediaSourcePlayer::~MediaSourcePlayer() {
- demuxer_->RemoveDemuxerClient(demuxer_client_id_);
Release();
}
@@ -107,31 +111,53 @@ void MediaSourcePlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
// processed.
if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
return;
+
+ // Eventual processing of surface change will take care of feeding the new
+ // video decoder initially with I-frame. See b/8950387.
SetPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
- if (IsEventPending(SEEK_EVENT_PENDING)) {
- // Waiting for the seek to finish.
+
+ // If seek is already pending, processing of the pending surface change
+ // event will occur in OnDemuxerSeekDone().
+ if (IsEventPending(SEEK_EVENT_PENDING))
+ return;
+
+ // If video config change is already pending, processing of the pending
+ // surface change event will occur in OnDemuxerConfigsAvailable().
+ if (reconfig_video_decoder_ && IsEventPending(CONFIG_CHANGE_EVENT_PENDING))
return;
- }
- // Setting a new surface will require a new MediaCodec to be created.
- // Request a seek so that the new decoder will decode an I-frame first.
- // Or otherwise, the new MediaCodec might crash. See b/8950387.
- ScheduleSeekEventAndStopDecoding();
+ // Otherwise we need to trigger pending event processing now.
+ ProcessPendingEvents();
}
-void MediaSourcePlayer::ScheduleSeekEventAndStopDecoding() {
+void MediaSourcePlayer::ScheduleSeekEventAndStopDecoding(
+ const base::TimeDelta& seek_time) {
+ DVLOG(1) << __FUNCTION__ << "(" << seek_time.InSecondsF() << ")";
+ DCHECK(!IsEventPending(SEEK_EVENT_PENDING));
+
+ pending_seek_ = false;
+
+ clock_.SetTime(seek_time, seek_time);
+ if (audio_timestamp_helper_)
+ audio_timestamp_helper_->SetBaseTimestamp(seek_time);
+
if (audio_decoder_job_ && audio_decoder_job_->is_decoding())
audio_decoder_job_->StopDecode();
if (video_decoder_job_ && video_decoder_job_->is_decoding())
video_decoder_job_->StopDecode();
- if (IsEventPending(SEEK_EVENT_PENDING))
- return;
-
SetPendingEvent(SEEK_EVENT_PENDING);
ProcessPendingEvents();
}
+void MediaSourcePlayer::BrowserSeekToCurrentTime() {
+ DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!IsEventPending(SEEK_EVENT_PENDING));
+ doing_browser_seek_ = true;
+ ScheduleSeekEventAndStopDecoding(GetCurrentTime());
+}
+
bool MediaSourcePlayer::Seekable() {
// If the duration TimeDelta, converted to milliseconds from microseconds,
// is >= 2^31, then the media is assumed to be unbounded and unseekable.
@@ -176,13 +202,23 @@ int MediaSourcePlayer::GetVideoHeight() {
return height_;
}
-void MediaSourcePlayer::SeekTo(base::TimeDelta timestamp) {
+void MediaSourcePlayer::SeekTo(const base::TimeDelta& timestamp) {
DVLOG(1) << __FUNCTION__ << "(" << timestamp.InSecondsF() << ")";
- clock_.SetTime(timestamp, timestamp);
- if (audio_timestamp_helper_)
- audio_timestamp_helper_->SetBaseTimestamp(timestamp);
- ScheduleSeekEventAndStopDecoding();
+ if (IsEventPending(SEEK_EVENT_PENDING)) {
+ DCHECK(doing_browser_seek_) << "SeekTo while SeekTo in progress";
+ DCHECK(!pending_seek_) << "SeekTo while SeekTo pending browser seek";
+
+ // There is a browser seek currently in progress to obtain I-frame to feed
+ // a newly constructed video decoder. Remember this real seek request so
+ // it can be initiated once OnDemuxerSeekDone() occurs for the browser seek.
+ pending_seek_ = true;
+ pending_seek_time_ = timestamp;
+ return;
+ }
+
+ doing_browser_seek_ = false;
+ ScheduleSeekEventAndStopDecoding(timestamp);
}
base::TimeDelta MediaSourcePlayer::GetCurrentTime() {
@@ -195,15 +231,39 @@ base::TimeDelta MediaSourcePlayer::GetDuration() {
void MediaSourcePlayer::Release() {
DVLOG(1) << __FUNCTION__;
+
+ // Allow pending seeks and config changes to survive this Release().
+ // If previously pending a prefetch done event, or a job was still decoding,
+ // then at end of Release() we need to ProcessPendingEvents() to process any
+ // seek or config change that was blocked by the prefetch or decode.
+ // TODO(qinmin/wolenetz): Maintain channel state to not double-request data
+ // or drop data received across Release()+Start(). See http://crbug.com/306314
+ // and http://crbug.com/304234.
+ bool process_pending_events = false;
+ process_pending_events = IsEventPending(PREFETCH_DONE_EVENT_PENDING) ||
+ (audio_decoder_job_ && audio_decoder_job_->is_decoding()) ||
+ (video_decoder_job_ && video_decoder_job_->is_decoding());
+
+ // Clear all the pending events except seeks and config changes.
+ pending_event_ &= (SEEK_EVENT_PENDING | CONFIG_CHANGE_EVENT_PENDING);
+
audio_decoder_job_.reset();
- video_decoder_job_.reset();
+ ResetVideoDecoderJob();
+
+ // Prevent job re-creation attempts in OnDemuxerConfigsAvailable()
reconfig_audio_decoder_ = false;
reconfig_video_decoder_ = false;
+
+ // Prevent player restart, including job re-creation attempts.
playing_ = false;
- pending_event_ = NO_EVENT_PENDING;
+
decoder_starvation_callback_.Cancel();
surface_ = gfx::ScopedJavaSurface();
- ReleaseMediaResourcesFromManager();
+ manager()->ReleaseMediaResources(player_id());
+ if (process_pending_events) {
+ DVLOG(1) << __FUNCTION__ << " : Resuming seek or config change processing";
+ ProcessPendingEvents();
+ }
}
void MediaSourcePlayer::SetVolume(double volume) {
@@ -258,8 +318,6 @@ void MediaSourcePlayer::StartInternal() {
return;
}
- audio_finished_ = false;
- video_finished_ = false;
SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -288,19 +346,15 @@ void MediaSourcePlayer::OnDemuxerConfigsAvailable(
height_ = configs.video_size.height();
is_video_encrypted_ = configs.is_video_encrypted;
- OnMediaMetadataChanged(duration_, width_, height_, true);
+ manager()->OnMediaMetadataChanged(
+ player_id(), duration_, width_, height_, true);
if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
if (reconfig_audio_decoder_)
ConfigureAudioDecoderJob();
- // If there is a pending surface change, we can merge it with the config
- // change.
- if (reconfig_video_decoder_) {
- if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
- ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+ if (reconfig_video_decoder_)
ConfigureVideoDecoderJob();
- }
ClearPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
@@ -313,10 +367,13 @@ void MediaSourcePlayer::OnDemuxerConfigsAvailable(
void MediaSourcePlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
DVLOG(1) << __FUNCTION__ << "(" << data.type << ")";
DCHECK_LT(0u, data.access_units.size());
- if (data.type == DemuxerStream::AUDIO && audio_decoder_job_)
+ if (data.type == DemuxerStream::AUDIO && audio_decoder_job_) {
audio_decoder_job_->OnDataReceived(data);
- else if (data.type == DemuxerStream::VIDEO && video_decoder_job_)
- video_decoder_job_->OnDataReceived(data);
+ } else if (data.type == DemuxerStream::VIDEO) {
+ next_video_data_is_iframe_ = false;
+ if (video_decoder_job_)
+ video_decoder_job_->OnDataReceived(data);
+ }
}
void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
@@ -345,8 +402,8 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
// TODO(qinmin): support DRM change after playback has started.
// http://crbug.com/253792.
if (GetCurrentTime() > base::TimeDelta()) {
- LOG(INFO) << "Setting DRM bridge after playback has started. "
- << "This is not well supported!";
+ VLOG(0) << "Setting DRM bridge after playback has started. "
+ << "This is not well supported!";
}
drm_bridge_ = drm_bridge;
@@ -361,15 +418,56 @@ void MediaSourcePlayer::SetDrmBridge(MediaDrmBridge* drm_bridge) {
StartInternal();
}
-void MediaSourcePlayer::OnDemuxerSeeked(unsigned seek_request_id) {
- DVLOG(1) << __FUNCTION__ << "(" << seek_request_id << ")";
- // Do nothing until the most recent seek request is processed.
- if (seek_request_id_ != seek_request_id)
- return;
+void MediaSourcePlayer::OnDemuxerSeekDone(
+ const base::TimeDelta& actual_browser_seek_time) {
+ DVLOG(1) << __FUNCTION__;
ClearPendingEvent(SEEK_EVENT_PENDING);
+ if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING))
+ ClearPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
+
+ next_video_data_is_iframe_ = true;
+
+ if (pending_seek_) {
+ DVLOG(1) << __FUNCTION__ << "processing pending seek";
+ DCHECK(doing_browser_seek_);
+ pending_seek_ = false;
+ SeekTo(pending_seek_time_);
+ return;
+ }
+
+ // It is possible that a browser seek to I-frame had to seek to a buffered
+ // I-frame later than the requested one due to data removal or GC. Update
+ // player clock to the actual seek target.
+ if (doing_browser_seek_) {
+ DCHECK(actual_browser_seek_time != kNoTimestamp());
+ // A browser seek must not jump into the past. Ideally, it seeks to the
+ // requested time, but it might jump into the future.
+ DCHECK(actual_browser_seek_time >= GetCurrentTime());
+ DVLOG(1) << __FUNCTION__ << " : setting clock to actual browser seek time: "
+ << actual_browser_seek_time.InSecondsF();
+ clock_.SetTime(actual_browser_seek_time, actual_browser_seek_time);
+ if (audio_timestamp_helper_)
+ audio_timestamp_helper_->SetBaseTimestamp(actual_browser_seek_time);
+ }
+
+ reached_audio_eos_ = false;
+ reached_video_eos_ = false;
+
+ base::TimeDelta current_time = GetCurrentTime();
+ // TODO(qinmin): Simplify the logic by using |start_presentation_timestamp_|
+ // to preroll media decoder jobs. Currently |start_presentation_timestamp_|
+ // is calculated from decoder output, while preroll relies on the access
+ // unit's timestamp. There are some differences between the two.
+ preroll_timestamp_ = current_time;
+ if (audio_decoder_job_)
+ audio_decoder_job_->BeginPrerolling(preroll_timestamp_);
+ if (video_decoder_job_)
+ video_decoder_job_->BeginPrerolling(preroll_timestamp_);
+
+ if (!doing_browser_seek_)
+ manager()->OnSeekComplete(player_id(), current_time);
- OnSeekComplete();
ProcessPendingEvents();
}
@@ -384,7 +482,7 @@ void MediaSourcePlayer::UpdateTimestamps(
}
clock_.SetMaxTime(new_max_time);
- OnTimeUpdated();
+ manager()->OnTimeUpdate(player_id(), GetCurrentTime());
}
void MediaSourcePlayer::ProcessPendingEvents() {
@@ -406,11 +504,9 @@ void MediaSourcePlayer::ProcessPendingEvents() {
}
if (IsEventPending(SEEK_EVENT_PENDING)) {
- int seek_request_id = ++seek_request_id_;
- DVLOG(1) << __FUNCTION__ << " : Handling SEEK_EVENT: " << seek_request_id;
+ DVLOG(1) << __FUNCTION__ << " : Handling SEEK_EVENT";
ClearDecodingData();
- demuxer_->RequestDemuxerSeek(
- demuxer_client_id_, GetCurrentTime(), seek_request_id);
+ demuxer_->RequestDemuxerSeek(GetCurrentTime(), doing_browser_seek_);
return;
}
@@ -418,32 +514,47 @@ void MediaSourcePlayer::ProcessPendingEvents() {
if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling CONFIG_CHANGE_EVENT.";
DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
- demuxer_->RequestDemuxerConfigs(demuxer_client_id_);
+ demuxer_->RequestDemuxerConfigs();
return;
}
if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling SURFACE_CHANGE_EVENT.";
- video_decoder_job_.reset();
+ // Setting a new surface will require a new MediaCodec to be created.
+ ResetVideoDecoderJob();
ConfigureVideoDecoderJob();
- ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+
+ // Return early if we can't successfully configure a new video decoder job
+ // yet, except continue processing other pending events if |surface_| is
+ // empty.
+ if (HasVideo() && !video_decoder_job_ && !surface_.IsEmpty())
+ return;
}
if (IsEventPending(PREFETCH_REQUEST_EVENT_PENDING)) {
DVLOG(1) << __FUNCTION__ << " : Handling PREFETCH_REQUEST_EVENT.";
- int count = (audio_decoder_job_ ? 1 : 0) + (video_decoder_job_ ? 1 : 0);
+ DCHECK(audio_decoder_job_ || AudioFinished());
+ DCHECK(video_decoder_job_ || VideoFinished());
+
+ int count = (AudioFinished() ? 0 : 1) + (VideoFinished() ? 0 : 1);
+
+ // It is possible that all streams have finished decode, yet starvation
+ // occurred during the last stream's EOS decode. In this case, prefetch is a
+ // no-op.
+ ClearPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
+ if (count == 0)
+ return;
+ SetPendingEvent(PREFETCH_DONE_EVENT_PENDING);
base::Closure barrier = BarrierClosure(count, base::Bind(
&MediaSourcePlayer::OnPrefetchDone, weak_this_.GetWeakPtr()));
- if (audio_decoder_job_)
+ if (!AudioFinished())
audio_decoder_job_->Prefetch(barrier);
- if (video_decoder_job_)
+ if (!VideoFinished())
video_decoder_job_->Prefetch(barrier);
- SetPendingEvent(PREFETCH_DONE_EVENT_PENDING);
- ClearPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
return;
}
@@ -459,7 +570,25 @@ void MediaSourcePlayer::MediaDecoderCallback(
bool is_audio, MediaCodecStatus status,
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
DVLOG(1) << __FUNCTION__ << ": " << is_audio << ", " << status;
- DCHECK(!is_waiting_for_key_);
+
+ // TODO(xhwang): Drop IntToString() when http://crbug.com/303899 is fixed.
+ if (is_audio) {
+ TRACE_EVENT_ASYNC_END1("media",
+ "MediaSourcePlayer::DecodeMoreAudio",
+ audio_decoder_job_.get(),
+ "MediaCodecStatus",
+ base::IntToString(status));
+ } else {
+ TRACE_EVENT_ASYNC_END1("media",
+ "MediaSourcePlayer::DecodeMoreVideo",
+ video_decoder_job_.get(),
+ "MediaCodecStatus",
+ base::IntToString(status));
+ }
+
+ // Let tests hook the completion of this decode cycle.
+ if (!decode_callback_for_testing_.is_null())
+ base::ResetAndReturn(&decode_callback_for_testing_).Run();
bool is_clock_manager = is_audio || !HasAudio();
@@ -467,23 +596,37 @@ void MediaSourcePlayer::MediaDecoderCallback(
decoder_starvation_callback_.Cancel();
if (status == MEDIA_CODEC_ERROR) {
+ DVLOG(1) << __FUNCTION__ << " : decode error";
Release();
- OnMediaError(MEDIA_ERROR_DECODE);
+ manager()->OnError(player_id(), MEDIA_ERROR_DECODE);
return;
}
- if (pending_event_ != NO_EVENT_PENDING) {
+ DCHECK(!IsEventPending(PREFETCH_DONE_EVENT_PENDING));
+
+ // Let |SEEK_EVENT_PENDING| (the highest priority event outside of
+ // |PREFETCH_DONE_EVENT_PENDING|) preempt output EOS detection here. Process
+ // any other pending events only after handling EOS detection.
+ if (IsEventPending(SEEK_EVENT_PENDING)) {
ProcessPendingEvents();
return;
}
- if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM) {
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
PlaybackCompleted(is_audio);
+
+ if (pending_event_ != NO_EVENT_PENDING) {
+ ProcessPendingEvents();
return;
}
- if (status == MEDIA_CODEC_OK && is_clock_manager)
+ if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM)
+ return;
+
+ if (status == MEDIA_CODEC_OK && is_clock_manager &&
+ presentation_timestamp != kNoTimestamp()) {
UpdateTimestamps(presentation_timestamp, audio_output_bytes);
+ }
if (!playing_) {
if (is_clock_manager)
@@ -502,8 +645,15 @@ void MediaSourcePlayer::MediaDecoderCallback(
if (status == MEDIA_CODEC_STOPPED)
return;
- if (status == MEDIA_CODEC_OK && is_clock_manager)
- StartStarvationCallback(presentation_timestamp);
+ if (is_clock_manager) {
+ // If we have a valid timestamp, start the starvation callback. Otherwise,
+ // reset the |start_time_ticks_| so that the next frame will not suffer
+ // from the decoding delay caused by the current frame.
+ if (presentation_timestamp != kNoTimestamp())
+ StartStarvationCallback(presentation_timestamp);
+ else
+ start_time_ticks_ = base::TimeTicks::Now();
+ }
if (is_audio) {
DecodeMoreAudio();
@@ -516,17 +666,29 @@ void MediaSourcePlayer::MediaDecoderCallback(
void MediaSourcePlayer::DecodeMoreAudio() {
DVLOG(1) << __FUNCTION__;
DCHECK(!audio_decoder_job_->is_decoding());
+ DCHECK(!AudioFinished());
if (audio_decoder_job_->Decode(
start_time_ticks_, start_presentation_timestamp_, base::Bind(
&MediaSourcePlayer::MediaDecoderCallback,
weak_this_.GetWeakPtr(), true))) {
+ TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreAudio",
+ audio_decoder_job_.get());
return;
}
// Failed to start the next decode.
// Wait for demuxer ready message.
+ DCHECK(!reconfig_audio_decoder_);
reconfig_audio_decoder_ = true;
+
+ // Config change may have just been detected on the other stream. If so,
+ // don't send a duplicate demuxer config request.
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
+ DCHECK(reconfig_video_decoder_);
+ return;
+ }
+
SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -534,17 +696,34 @@ void MediaSourcePlayer::DecodeMoreAudio() {
void MediaSourcePlayer::DecodeMoreVideo() {
DVLOG(1) << __FUNCTION__;
DCHECK(!video_decoder_job_->is_decoding());
+ DCHECK(!VideoFinished());
if (video_decoder_job_->Decode(
start_time_ticks_, start_presentation_timestamp_, base::Bind(
&MediaSourcePlayer::MediaDecoderCallback,
weak_this_.GetWeakPtr(), false))) {
+ TRACE_EVENT_ASYNC_BEGIN0("media", "MediaSourcePlayer::DecodeMoreVideo",
+ video_decoder_job_.get());
return;
}
// Failed to start the next decode.
// Wait for demuxer ready message.
+
+ // After this detection of video config change, next video data received
+ // will begin with I-frame.
+ next_video_data_is_iframe_ = true;
+
+ DCHECK(!reconfig_video_decoder_);
reconfig_video_decoder_ = true;
+
+ // Config change may have just been detected on the other stream. If so,
+ // don't send a duplicate demuxer config request.
+ if (IsEventPending(CONFIG_CHANGE_EVENT_PENDING)) {
+ DCHECK(reconfig_audio_decoder_);
+ return;
+ }
+
SetPendingEvent(CONFIG_CHANGE_EVENT_PENDING);
ProcessPendingEvents();
}
@@ -552,15 +731,15 @@ void MediaSourcePlayer::DecodeMoreVideo() {
void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
DVLOG(1) << __FUNCTION__ << "(" << is_audio << ")";
if (is_audio)
- audio_finished_ = true;
+ reached_audio_eos_ = true;
else
- video_finished_ = true;
+ reached_video_eos_ = true;
- if ((!HasAudio() || audio_finished_) && (!HasVideo() || video_finished_)) {
+ if (AudioFinished() && VideoFinished()) {
playing_ = false;
clock_.Pause();
start_time_ticks_ = base::TimeTicks();
- OnPlaybackComplete();
+ manager()->OnPlaybackComplete(player_id());
}
}
@@ -581,6 +760,14 @@ bool MediaSourcePlayer::HasAudio() {
return kUnknownAudioCodec != audio_codec_;
}
+bool MediaSourcePlayer::AudioFinished() {
+ return reached_audio_eos_ || !HasAudio();
+}
+
+bool MediaSourcePlayer::VideoFinished() {
+ return reached_video_eos_ || !HasVideo();
+}
+
void MediaSourcePlayer::ConfigureAudioDecoderJob() {
if (!HasAudio()) {
audio_decoder_job_.reset();
@@ -597,38 +784,71 @@ void MediaSourcePlayer::ConfigureAudioDecoderJob() {
DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
+ DVLOG(1) << __FUNCTION__ << " : creating new audio decoder job";
+
audio_decoder_job_.reset(AudioDecoderJob::Create(
audio_codec_, sampling_rate_, num_channels_, &audio_extra_data_[0],
audio_extra_data_.size(), media_crypto.obj(),
base::Bind(&DemuxerAndroid::RequestDemuxerData,
- base::Unretained(demuxer_), demuxer_client_id_,
- DemuxerStream::AUDIO)));
+ base::Unretained(demuxer_.get()), DemuxerStream::AUDIO)));
if (audio_decoder_job_) {
SetVolumeInternal();
+ audio_decoder_job_->BeginPrerolling(preroll_timestamp_);
reconfig_audio_decoder_ = false;
}
}
+void MediaSourcePlayer::ResetVideoDecoderJob() {
+ video_decoder_job_.reset();
+
+ // Any eventual video decoder job re-creation will use the current |surface_|.
+ if (IsEventPending(SURFACE_CHANGE_EVENT_PENDING))
+ ClearPendingEvent(SURFACE_CHANGE_EVENT_PENDING);
+}
+
void MediaSourcePlayer::ConfigureVideoDecoderJob() {
if (!HasVideo() || surface_.IsEmpty()) {
- video_decoder_job_.reset();
+ ResetVideoDecoderJob();
return;
}
- // Create video decoder job only if config changes.
- if (video_decoder_job_ && !reconfig_video_decoder_)
+ // Create video decoder job only if config changes or we don't have a job.
+ if (video_decoder_job_ && !reconfig_video_decoder_) {
+ DCHECK(!IsEventPending(SURFACE_CHANGE_EVENT_PENDING));
return;
+ }
+
+ DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
+
+ if (reconfig_video_decoder_) {
+ // No hack browser seek should be required. I-Frame must be next.
+ DCHECK(next_video_data_is_iframe_) << "Received video data between "
+ << "detecting video config change and reconfiguring video decoder";
+ }
+
+ // If uncertain that video I-frame data is next and there is no seek already
+ // in process, request browser demuxer seek so the new decoder will decode
+ // an I-frame first. Otherwise, the new MediaCodec might crash. See b/8950387.
+ // Eventual OnDemuxerSeekDone() will trigger ProcessPendingEvents() and
+ // continue from here.
+ // TODO(wolenetz): Instead of doing hack browser seek, replay cached data
+ // since last keyframe. See http://crbug.com/304234.
+ if (!next_video_data_is_iframe_ && !IsEventPending(SEEK_EVENT_PENDING)) {
+ BrowserSeekToCurrentTime();
+ return;
+ }
+
+ // Release the old VideoDecoderJob first so the surface can get released.
+ // Android does not allow 2 MediaCodec instances use the same surface.
+ ResetVideoDecoderJob();
base::android::ScopedJavaLocalRef<jobject> media_crypto = GetMediaCrypto();
if (is_video_encrypted_ && media_crypto.is_null())
return;
- DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
+ DVLOG(1) << __FUNCTION__ << " : creating new video decoder job";
- // Release the old VideoDecoderJob first so the surface can get released.
- // Android does not allow 2 MediaCodec instances use the same surface.
- video_decoder_job_.reset();
// Create the new VideoDecoderJob.
bool is_secure = IsProtectedSurfaceRequired();
video_decoder_job_.reset(
@@ -638,16 +858,19 @@ void MediaSourcePlayer::ConfigureVideoDecoderJob() {
surface_.j_surface().obj(),
media_crypto.obj(),
base::Bind(&DemuxerAndroid::RequestDemuxerData,
- base::Unretained(demuxer_),
- demuxer_client_id_,
+ base::Unretained(demuxer_.get()),
DemuxerStream::VIDEO)));
- if (video_decoder_job_)
- reconfig_video_decoder_ = false;
+ if (!video_decoder_job_)
+ return;
+
+ video_decoder_job_->BeginPrerolling(preroll_timestamp_);
+ reconfig_video_decoder_ = false;
// Inform the fullscreen view the player is ready.
// TODO(qinmin): refactor MediaPlayerBridge so that we have a better way
// to inform ContentVideoView.
- OnMediaMetadataChanged(duration_, width_, height_, true);
+ manager()->OnMediaMetadataChanged(
+ player_id(), duration_, width_, height_, true);
}
void MediaSourcePlayer::OnDecoderStarved() {
@@ -702,7 +925,17 @@ void MediaSourcePlayer::OnPrefetchDone() {
DVLOG(1) << __FUNCTION__;
DCHECK(!audio_decoder_job_ || !audio_decoder_job_->is_decoding());
DCHECK(!video_decoder_job_ || !video_decoder_job_->is_decoding());
- DCHECK(IsEventPending(PREFETCH_DONE_EVENT_PENDING));
+
+ // A previously posted OnPrefetchDone() could race against a Release(). If
+ // Release() won the race, we should no longer have decoder jobs.
+ // TODO(qinmin/wolenetz): Maintain channel state to not double-request data
+ // or drop data received across Release()+Start(). See http://crbug.com/306314
+ // and http://crbug.com/304234.
+ if (!IsEventPending(PREFETCH_DONE_EVENT_PENDING)) {
+ DVLOG(1) << __FUNCTION__ << " : aborting";
+ DCHECK(!audio_decoder_job_ && !video_decoder_job_);
+ return;
+ }
ClearPendingEvent(PREFETCH_DONE_EVENT_PENDING);
@@ -716,9 +949,10 @@ void MediaSourcePlayer::OnPrefetchDone() {
if (!clock_.IsPlaying())
clock_.Play();
- if (audio_decoder_job_)
+ if (!AudioFinished())
DecodeMoreAudio();
- if (video_decoder_job_)
+
+ if (!VideoFinished())
DecodeMoreVideo();
}
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
index 1708e39a260..ef822d49b28 100644
--- a/chromium/media/base/android/media_source_player.h
+++ b/chromium/media/base/android/media_source_player.h
@@ -33,17 +33,14 @@ class VideoDecoderJob;
// This class handles media source extensions on Android. It uses Android
// MediaCodec to decode audio and video streams in two separate threads.
-// IPC is being used to send data from the render process to this object.
-// TODO(qinmin): use shared memory to send data between processes.
class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
public DemuxerAndroidClient {
public:
- // Constructs a player with the given IDs. |manager| and |demuxer| must
- // outlive the lifetime of this object.
+ // Constructs a player with the given ID and demuxer. |manager| must outlive
+ // the lifetime of this object.
MediaSourcePlayer(int player_id,
MediaPlayerManager* manager,
- int demuxer_client_id,
- DemuxerAndroid* demuxer);
+ scoped_ptr<DemuxerAndroid> demuxer);
virtual ~MediaSourcePlayer();
static bool IsTypeSupported(const std::vector<uint8>& scheme_uuid,
@@ -55,7 +52,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
virtual void SetVideoSurface(gfx::ScopedJavaSurface surface) OVERRIDE;
virtual void Start() OVERRIDE;
virtual void Pause(bool is_media_related_action ALLOW_UNUSED) OVERRIDE;
- virtual void SeekTo(base::TimeDelta timestamp) OVERRIDE;
+ virtual void SeekTo(const base::TimeDelta& timestamp) OVERRIDE;
virtual void Release() OVERRIDE;
virtual void SetVolume(double volume) OVERRIDE;
virtual int GetVideoWidth() OVERRIDE;
@@ -73,7 +70,8 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// DemuxerAndroidClient implementation.
virtual void OnDemuxerConfigsAvailable(const DemuxerConfigs& params) OVERRIDE;
virtual void OnDemuxerDataAvailable(const DemuxerData& params) OVERRIDE;
- virtual void OnDemuxerSeeked(unsigned seek_request_id) OVERRIDE;
+ virtual void OnDemuxerSeekDone(
+ const base::TimeDelta& actual_browser_seek_time) OVERRIDE;
virtual void OnDemuxerDurationChanged(base::TimeDelta duration) OVERRIDE;
private:
@@ -99,17 +97,21 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Callback to notify that MediaCrypto is ready in |drm_bridge_|.
void OnMediaCryptoReady();
- // Handle pending events when all the decoder jobs finished.
+ // Handle pending events if all the decoder jobs are not currently decoding.
void ProcessPendingEvents();
- // Helper method to configure the decoder jobs.
+ // Helper method to clear any pending |SURFACE_CHANGE_EVENT_PENDING|
+ // and reset |video_decoder_job_| to null.
+ void ResetVideoDecoderJob();
+
+ // Helper methods to configure the decoder jobs.
void ConfigureVideoDecoderJob();
void ConfigureAudioDecoderJob();
// Flush the decoders and clean up all the data needs to be decoded.
void ClearDecodingData();
- // Called to decoder more data.
+ // Called to decode more data.
void DecodeMoreAudio();
void DecodeMoreVideo();
@@ -117,6 +119,11 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
bool HasVideo();
bool HasAudio();
+ // Functions that check whether audio/video stream has reached end of output
+ // or are not present in player configuration.
+ bool AudioFinished();
+ bool VideoFinished();
+
// Determine seekability based on duration.
bool Seekable();
@@ -130,8 +137,18 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
void StartStarvationCallback(const base::TimeDelta& presentation_timestamp);
// Schedules a seek event in |pending_events_| and calls StopDecode() on all
- // the MediaDecoderJobs.
- void ScheduleSeekEventAndStopDecoding();
+ // the MediaDecoderJobs. Sets clock to |seek_time|, and resets
+ // |pending_seek_|. There must not already be a seek event in
+ // |pending_events_|.
+ void ScheduleSeekEventAndStopDecoding(const base::TimeDelta& seek_time);
+
+ // Schedules a browser seek event. We must not currently be processing any
+ // seek. Note that there is possibility that browser seek of renderer demuxer
+ // may unexpectedly stall due to lack of buffered data at or after the browser
+ // seek time.
+ // TODO(wolenetz): Instead of doing hack browser seek, replay cached data
+ // since last keyframe. See http://crbug.com/304234.
+ void BrowserSeekToCurrentTime();
// Helper function to set the volume.
void SetVolumeInternal();
@@ -146,6 +163,17 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// resync with audio and starts decoding.
void OnPrefetchDone();
+ // Test-only method to setup hook for the completion of the next decode cycle.
+ // This callback state is cleared when it is next run.
+ // Prevent usage creep by only calling this from the
+ // ReleaseWithOnPrefetchDoneAlreadyPosted MediaSourcePlayerTest.
+ void set_decode_callback_for_testing(const base::Closure& test_decode_cb) {
+ decode_callback_for_testing_ = test_decode_cb;
+ }
+
+ // TODO(qinmin/wolenetz): Reorder these based on their priority from
+ // ProcessPendingEvents(). Release() and other routines are dependent upon
+ // priority consistency.
enum PendingEventFlags {
NO_EVENT_PENDING = 0,
SEEK_EVENT_PENDING = 1 << 0,
@@ -160,15 +188,11 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
void SetPendingEvent(PendingEventFlags event);
void ClearPendingEvent(PendingEventFlags event);
- int demuxer_client_id_;
- DemuxerAndroid* demuxer_;
+ scoped_ptr<DemuxerAndroid> demuxer_;
// Pending event that the player needs to do.
unsigned pending_event_;
- // ID to keep track of whether all the seek requests are acked.
- unsigned seek_request_id_;
-
// Stats about the media.
base::TimeDelta duration_;
int width_;
@@ -179,8 +203,8 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
int sampling_rate_;
// TODO(xhwang/qinmin): Add |video_extra_data_|.
std::vector<uint8> audio_extra_data_;
- bool audio_finished_;
- bool video_finished_;
+ bool reached_audio_eos_;
+ bool reached_video_eos_;
bool playing_;
bool is_audio_encrypted_;
bool is_video_encrypted_;
@@ -204,6 +228,26 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// The surface object currently owned by the player.
gfx::ScopedJavaSurface surface_;
+ // Track whether or not the player has received any video data since the most
+ // recent of player construction, end of last seek, or receiving and
+ // detecting a |kConfigChanged| access unit from the demuxer.
+ // If no such video data has been received, the next video data begins with
+ // an I-frame. Otherwise, we have no such guarantee.
+ bool next_video_data_is_iframe_;
+
+ // Flag that is true if doing a hack browser seek or false if doing a
+ // regular seek. Only valid when |SEEK_EVENT_PENDING| is pending.
+ // TODO(wolenetz): Instead of doing hack browser seek, replay cached data
+ // since last keyframe. See http://crbug.com/304234.
+ bool doing_browser_seek_;
+
+ // If already doing a browser seek when a regular seek request arrives,
+ // these fields remember the regular seek so OnDemuxerSeekDone() can trigger
+ // it when the browser seek is done. These are only valid when
+ // |SEEK_EVENT_PENDING| is pending.
+ bool pending_seek_;
+ base::TimeDelta pending_seek_time_;
+
// Decoder jobs.
scoped_ptr<AudioDecoderJob, MediaDecoderJob::Deleter> audio_decoder_job_;
scoped_ptr<VideoDecoderJob, MediaDecoderJob::Deleter> video_decoder_job_;
@@ -211,6 +255,10 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
bool reconfig_audio_decoder_;
bool reconfig_video_decoder_;
+ // Track the most recent preroll target. Decoder re-creation needs this to
+ // resume any in-progress preroll.
+ base::TimeDelta preroll_timestamp_;
+
// A cancelable task that is posted when the audio decoder starts requesting
// new data. This callback runs if no data arrives before the timeout period
// elapses.
@@ -229,6 +277,9 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// try to start playback again.
bool is_waiting_for_key_;
+ // Test-only callback for hooking the completion of the next decode cycle.
+ base::Closure decode_callback_for_testing_;
+
friend class MediaSourcePlayerTest;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayer);
};
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
index edf7016010c..7970acc0050 100644
--- a/chromium/media/base/android/media_source_player_unittest.cc
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -11,6 +11,7 @@
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_source_player.h"
+#include "media/base/bind_to_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_data_util.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,6 +19,15 @@
namespace media {
+// Helper macro to skip the test if MediaCodecBridge isn't available.
+#define SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE() \
+ do { \
+ if (!MediaCodecBridge::IsAvailable()) { \
+ VLOG(0) << "Could not run test - not supported on device."; \
+ return; \
+ } \
+ } while (0)
+
static const int kDefaultDurationInMs = 10000;
static const char kAudioMp4[] = "audio/mp4";
@@ -25,11 +35,15 @@ static const char kVideoMp4[] = "video/mp4";
static const char kAudioWebM[] = "audio/webm";
static const char kVideoWebM[] = "video/webm";
+// TODO(wolenetz/qinmin): Simplify tests with more effective mock usage, and
+// fix flaky pointer-based MDJ inequality testing. See http://crbug.com/327839.
+
// Mock of MediaPlayerManager for testing purpose
class MockMediaPlayerManager : public MediaPlayerManager {
public:
explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
- : message_loop_(message_loop) {}
+ : message_loop_(message_loop),
+ playback_completed_(false) {}
virtual ~MockMediaPlayerManager() {}
// MediaPlayerManager implementation.
@@ -44,36 +58,45 @@ class MockMediaPlayerManager : public MediaPlayerManager {
int player_id, base::TimeDelta duration, int width, int height,
bool success) OVERRIDE {}
virtual void OnPlaybackComplete(int player_id) OVERRIDE {
+ playback_completed_ = true;
if (message_loop_->is_running())
message_loop_->Quit();
}
virtual void OnMediaInterrupted(int player_id) OVERRIDE {}
virtual void OnBufferingUpdate(int player_id, int percentage) OVERRIDE {}
virtual void OnSeekComplete(int player_id,
- base::TimeDelta current_time) OVERRIDE {}
+ const base::TimeDelta& current_time) OVERRIDE {}
virtual void OnError(int player_id, int error) OVERRIDE {}
virtual void OnVideoSizeChanged(int player_id, int width,
int height) OVERRIDE {}
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
virtual void DestroyAllMediaPlayers() OVERRIDE {}
- virtual media::MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
+ virtual MediaDrmBridge* GetDrmBridge(int media_keys_id) OVERRIDE {
return NULL;
}
virtual void OnProtectedSurfaceRequested(int player_id) OVERRIDE {}
- virtual void OnKeyAdded(int key_id,
- const std::string& session_id) OVERRIDE {}
- virtual void OnKeyError(int key_id,
- const std::string& session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) OVERRIDE {}
- virtual void OnKeyMessage(int key_id,
- const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& destination_url) OVERRIDE {}
+ virtual void OnSessionCreated(int media_keys_id,
+ uint32 session_id,
+ const std::string& web_session_id) OVERRIDE {}
+ virtual void OnSessionMessage(int media_keys_id,
+ uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) OVERRIDE {}
+ virtual void OnSessionReady(int media_keys_id, uint32 session_id) OVERRIDE {}
+ virtual void OnSessionClosed(int media_keys_id, uint32 session_id) OVERRIDE {}
+ virtual void OnSessionError(int media_keys_id,
+ uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) OVERRIDE {}
+
+ bool playback_completed() const {
+ return playback_completed_;
+ }
private:
base::MessageLoop* message_loop_;
+ bool playback_completed_;
DISALLOW_COPY_AND_ASSIGN(MockMediaPlayerManager);
};
@@ -82,35 +105,47 @@ class MockDemuxerAndroid : public DemuxerAndroid {
public:
explicit MockDemuxerAndroid(base::MessageLoop* message_loop)
: message_loop_(message_loop),
- num_requests_(0),
- last_seek_request_id_(0) {}
+ num_data_requests_(0),
+ num_seek_requests_(0),
+ num_browser_seek_requests_(0),
+ num_config_requests_(0) {}
virtual ~MockDemuxerAndroid() {}
- virtual void AddDemuxerClient(int demuxer_client_id,
- DemuxerAndroidClient* client) OVERRIDE {}
- virtual void RemoveDemuxerClient(int demuxer_client_id) OVERRIDE {}
- virtual void RequestDemuxerConfigs(int demuxer_client_id) OVERRIDE {}
- virtual void RequestDemuxerData(int demuxer_client_id,
- media::DemuxerStream::Type type) OVERRIDE {
- num_requests_++;
+ virtual void Initialize(DemuxerAndroidClient* client) OVERRIDE {}
+ virtual void RequestDemuxerConfigs() OVERRIDE {
+ num_config_requests_++;
+ }
+ virtual void RequestDemuxerData(DemuxerStream::Type type) OVERRIDE {
+ num_data_requests_++;
if (message_loop_->is_running())
message_loop_->Quit();
}
- virtual void RequestDemuxerSeek(int demuxer_client_id,
- base::TimeDelta time_to_seek,
- unsigned seek_request_id) OVERRIDE {
- last_seek_request_id_ = seek_request_id;
+ virtual void RequestDemuxerSeek(const base::TimeDelta& time_to_seek,
+ bool is_browser_seek) OVERRIDE {
+ num_seek_requests_++;
+ if (is_browser_seek)
+ num_browser_seek_requests_++;
}
- int num_requests() const { return num_requests_; }
- unsigned last_seek_request_id() const { return last_seek_request_id_; }
+ int num_data_requests() const { return num_data_requests_; }
+ int num_seek_requests() const { return num_seek_requests_; }
+ int num_browser_seek_requests() const { return num_browser_seek_requests_; }
+ int num_config_requests() const { return num_config_requests_; }
private:
base::MessageLoop* message_loop_;
- // The number of request this object has requested for decoding data.
- int num_requests_;
- unsigned last_seek_request_id_;
+ // The number of encoded data requests this object has seen.
+ int num_data_requests_;
+
+ // The number of regular and browser seek requests this object has seen.
+ int num_seek_requests_;
+
+ // The number of browser seek requests this object has seen.
+ int num_browser_seek_requests_;
+
+ // The number of demuxer config requests this object has seen.
+ int num_config_requests_;
DISALLOW_COPY_AND_ASSIGN(MockDemuxerAndroid);
};
@@ -119,8 +154,10 @@ class MediaSourcePlayerTest : public testing::Test {
public:
MediaSourcePlayerTest()
: manager_(&message_loop_),
- demuxer_(&message_loop_),
- player_(0, &manager_, 0, &demuxer_) {}
+ demuxer_(new MockDemuxerAndroid(&message_loop_)),
+ player_(0, &manager_, scoped_ptr<DemuxerAndroid>(demuxer_)),
+ decoder_callback_hook_executed_(false),
+ surface_texture_a_is_next_(true) {}
virtual ~MediaSourcePlayerTest() {}
protected:
@@ -134,50 +171,180 @@ class MediaSourcePlayerTest : public testing::Test {
player_.video_decoder_job_.get());
}
- // Starts an audio decoder job.
- void StartAudioDecoderJob() {
+ // Get the per-job prerolling status from the MediaSourcePlayer's job matching
+ // |is_audio|. Caller must guard against NPE if the player's job is NULL.
+ bool IsPrerolling(bool is_audio) {
+ return GetMediaDecoderJob(is_audio)->prerolling();
+ }
+
+ // Get the preroll timestamp from the MediaSourcePlayer.
+ base::TimeDelta GetPrerollTimestamp() {
+ return player_.preroll_timestamp_;
+ }
+
+ // Simulate player has reached starvation timeout.
+ void TriggerPlayerStarvation() {
+ player_.decoder_starvation_callback_.Cancel();
+ player_.OnDecoderStarved();
+ }
+
+ // Release() the player.
+ void ReleasePlayer() {
+ EXPECT_TRUE(player_.IsPlaying());
+ player_.Release();
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ }
+
+ // Upon the next successful decode callback, post a task to call Release()
+ // on the |player_|. TEST_F's do not have access to the private player
+ // members, hence this helper method.
+ // Prevent usage creep of MSP::set_decode_callback_for_testing() by
+ // only using it for the ReleaseWithOnPrefetchDoneAlreadyPosted test.
+ void OnNextTestDecodeCallbackPostTaskToReleasePlayer() {
+ player_.set_decode_callback_for_testing(media::BindToLoop(
+ message_loop_.message_loop_proxy(),
+ base::Bind(
+ &MediaSourcePlayerTest::ReleaseWithPendingPrefetchDoneVerification,
+ base::Unretained(this))));
+ }
+
+ // Asynch test callback posted upon decode completion to verify that a pending
+ // prefetch done event is cleared across |player_|'s Release(). This helps
+ // ensure the ReleaseWithOnPrefetchDoneAlreadyPosted test scenario is met.
+ void ReleaseWithPendingPrefetchDoneVerification() {
+ EXPECT_TRUE(player_.IsEventPending(player_.PREFETCH_DONE_EVENT_PENDING));
+ ReleasePlayer();
+ EXPECT_FALSE(player_.IsEventPending(player_.PREFETCH_DONE_EVENT_PENDING));
+ EXPECT_FALSE(decoder_callback_hook_executed_);
+ decoder_callback_hook_executed_ = true;
+ }
+
+ // Inspect internal pending_event_ state of |player_|. This is for infrequent
+ // use by tests, only where required.
+ bool IsPendingSurfaceChange() {
+ return player_.IsEventPending(player_.SURFACE_CHANGE_EVENT_PENDING);
+ }
+
+ DemuxerConfigs CreateAudioDemuxerConfigs(AudioCodec audio_codec) {
DemuxerConfigs configs;
- configs.audio_codec = kCodecVorbis;
+ configs.audio_codec = audio_codec;
configs.audio_channels = 2;
- configs.audio_sampling_rate = 44100;
configs.is_audio_encrypted = false;
configs.duration_ms = kDefaultDurationInMs;
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
+
+ if (audio_codec == kCodecVorbis) {
+ configs.audio_sampling_rate = 44100;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
+ "vorbis-extradata");
+ configs.audio_extra_data = std::vector<uint8>(
+ buffer->data(),
+ buffer->data() + buffer->data_size());
+ return configs;
+ }
+
+ // Other codecs are not yet supported by this helper.
+ EXPECT_EQ(audio_codec, kCodecAAC);
+
+ configs.audio_sampling_rate = 48000;
+ uint8 aac_extra_data[] = { 0x13, 0x10 };
configs.audio_extra_data = std::vector<uint8>(
- buffer->data(),
- buffer->data() + buffer->data_size());
- Start(configs);
+ aac_extra_data,
+ aac_extra_data + 2);
+ return configs;
}
- void StartVideoDecoderJob() {
+ DemuxerConfigs CreateVideoDemuxerConfigs() {
DemuxerConfigs configs;
configs.video_codec = kCodecVP8;
configs.video_size = gfx::Size(320, 240);
configs.is_video_encrypted = false;
configs.duration_ms = kDefaultDurationInMs;
- Start(configs);
+ return configs;
}
- // Starts decoding the data.
- void Start(const DemuxerConfigs& configs) {
+ DemuxerConfigs CreateAudioVideoDemuxerConfigs() {
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
+ configs.video_codec = kCodecVP8;
+ configs.video_size = gfx::Size(320, 240);
+ configs.is_video_encrypted = false;
+ return configs;
+ }
+
+ DemuxerConfigs CreateDemuxerConfigs(bool have_audio, bool have_video) {
+ DCHECK(have_audio || have_video);
+
+ if (have_audio && !have_video)
+ return CreateAudioDemuxerConfigs(kCodecVorbis);
+
+ if (have_video && !have_audio)
+ return CreateVideoDemuxerConfigs();
+
+ return CreateAudioVideoDemuxerConfigs();
+ }
+
+ // Starts an audio decoder job. Verifies player behavior relative to
+ // |expect_player_requests_data|.
+ void StartAudioDecoderJob(bool expect_player_requests_data) {
+ Start(CreateAudioDemuxerConfigs(kCodecVorbis), expect_player_requests_data);
+ }
+
+ // Starts a video decoder job. Verifies player behavior relative to
+ // |expect_player_requests_data|.
+ void StartVideoDecoderJob(bool expect_player_requests_data) {
+ Start(CreateVideoDemuxerConfigs(), expect_player_requests_data);
+ }
+
+ // Starts decoding the data. Verifies player behavior relative to
+ // |expect_player_requests_data|.
+ void Start(const DemuxerConfigs& configs, bool expect_player_requests_data) {
+ bool has_audio = configs.audio_codec != kUnknownAudioCodec;
+ bool has_video = configs.video_codec != kUnknownVideoCodec;
+ int original_num_data_requests = demuxer_->num_data_requests();
+ int expected_request_delta = expect_player_requests_data ?
+ ((has_audio ? 1 : 0) + (has_video ? 1 : 0)) : 0;
+
player_.OnDemuxerConfigsAvailable(configs);
player_.Start();
+
+ EXPECT_TRUE(player_.IsPlaying());
+ EXPECT_EQ(original_num_data_requests + expected_request_delta,
+ demuxer_->num_data_requests());
+
+ // Verify player has decoder job iff the config included the media type for
+ // the job and the player is expected to request data due to Start(), above.
+ EXPECT_EQ(expect_player_requests_data && has_audio,
+ GetMediaDecoderJob(true) != NULL);
+ EXPECT_EQ(expect_player_requests_data && has_video,
+ GetMediaDecoderJob(false) != NULL);
+ }
+
+ AccessUnit CreateAccessUnitWithData(bool is_audio, int audio_packet_id) {
+ AccessUnit unit;
+
+ unit.status = DemuxerStream::kOk;
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
+ is_audio ? base::StringPrintf("vorbis-packet-%d", audio_packet_id)
+ : "vp8-I-frame-320x240");
+ unit.data = std::vector<uint8>(
+ buffer->data(), buffer->data() + buffer->data_size());
+
+ if (is_audio) {
+ // Vorbis needs 4 extra bytes padding on Android to decode properly. Check
+ // NuMediaExtractor.cpp in Android source code.
+ uint8 padding[4] = { 0xff , 0xff , 0xff , 0xff };
+ unit.data.insert(unit.data.end(), padding, padding + 4);
+ }
+
+ return unit;
}
DemuxerData CreateReadFromDemuxerAckForAudio(int packet_id) {
DemuxerData data;
data.type = DemuxerStream::AUDIO;
data.access_units.resize(1);
- data.access_units[0].status = DemuxerStream::kOk;
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(
- base::StringPrintf("vorbis-packet-%d", packet_id));
- data.access_units[0].data = std::vector<uint8>(
- buffer->data(), buffer->data() + buffer->data_size());
- // Vorbis needs 4 extra bytes padding on Android to decode properly. Check
- // NuMediaExtractor.cpp in Android source code.
- uint8 padding[4] = { 0xff , 0xff , 0xff , 0xff };
- data.access_units[0].data.insert(
- data.access_units[0].data.end(), padding, padding + 4);
+ data.access_units[0] = CreateAccessUnitWithData(true, packet_id);
return data;
}
@@ -185,23 +352,326 @@ class MediaSourcePlayerTest : public testing::Test {
DemuxerData data;
data.type = DemuxerStream::VIDEO;
data.access_units.resize(1);
- data.access_units[0].status = DemuxerStream::kOk;
- scoped_refptr<DecoderBuffer> buffer =
- ReadTestDataFile("vp8-I-frame-320x240");
- data.access_units[0].data = std::vector<uint8>(
- buffer->data(), buffer->data() + buffer->data_size());
+ data.access_units[0] = CreateAccessUnitWithData(false, 0);
return data;
}
DemuxerData CreateEOSAck(bool is_audio) {
- DemuxerData data;
- data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
- data.access_units.resize(1);
- data.access_units[0].status = DemuxerStream::kOk;
- data.access_units[0].end_of_stream = true;
- return data;
+ DemuxerData data;
+ data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kOk;
+ data.access_units[0].end_of_stream = true;
+ return data;
+ }
+
+ DemuxerData CreateAbortedAck(bool is_audio) {
+ DemuxerData data;
+ data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
+ data.access_units.resize(1);
+ data.access_units[0].status = DemuxerStream::kAborted;
+ return data;
+ }
+
+ // Helper method for use at test start. It starts an audio decoder job and
+ // immediately feeds it some data to decode. Then, without letting the decoder
+ // job complete a decode cycle, it also starts player SeekTo(). Upon return,
+ // the player should not yet have sent the DemuxerSeek IPC request, though
+ // seek event should be pending. The audio decoder job will also still be
+ // decoding.
+ void StartAudioDecoderJobAndSeekToWhileDecoding(
+ const base::TimeDelta& seek_time) {
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_EQ(0, demuxer_->num_data_requests());
+ EXPECT_EQ(0.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_EQ(player_.GetCurrentTime(), GetPrerollTimestamp());
+ StartAudioDecoderJob(true);
+ EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ player_.SeekTo(seek_time);
+ EXPECT_EQ(0.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+ }
+
+ // Seek, including simulated receipt of |kAborted| read between SeekTo() and
+ // OnDemuxerSeekDone(). Use this helper method only when the player already
+ // has created the decoder job. Exactly one request for more data is expected
+ // following the seek, so use this helper for players with only audio or only
+ // video.
+ void SeekPlayerWithAbort(bool is_audio, const base::TimeDelta& seek_time) {
+ int original_num_seeks = demuxer_->num_seek_requests();
+ int original_num_data_requests = demuxer_->num_data_requests();
+
+ // Initiate a seek. Skip the round-trip of requesting seek from renderer.
+ // Instead behave as if the renderer has asked us to seek.
+ player_.SeekTo(seek_time);
+
+ // Verify that the seek does not occur until previously outstanding data
+ // request is satisfied.
+ EXPECT_EQ(original_num_seeks, demuxer_->num_seek_requests());
+
+ // Simulate seeking causes the demuxer to abort the outstanding read
+ // caused by the seek.
+ player_.OnDemuxerDataAvailable(CreateAbortedAck(is_audio));
+
+ // Verify that the seek is requested.
+ EXPECT_EQ(original_num_seeks + 1, demuxer_->num_seek_requests());
+
+ // Send back the seek done notification. This should trigger the player to
+ // call OnReadFromDemuxer() again.
+ EXPECT_EQ(original_num_data_requests, demuxer_->num_data_requests());
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_EQ(original_num_data_requests + 1, demuxer_->num_data_requests());
+
+ // No other seek should have been requested.
+ EXPECT_EQ(original_num_seeks + 1, demuxer_->num_seek_requests());
+ }
+
+ DemuxerData CreateReadFromDemuxerAckWithConfigChanged(bool is_audio,
+ int config_unit_index) {
+ DemuxerData data;
+ data.type = is_audio ? DemuxerStream::AUDIO : DemuxerStream::VIDEO;
+ data.access_units.resize(config_unit_index + 1);
+
+ for (int i = 0; i < config_unit_index; ++i)
+ data.access_units[i] = CreateAccessUnitWithData(is_audio, i);
+
+ data.access_units[config_unit_index].status = DemuxerStream::kConfigChanged;
+ return data;
+ }
+
+ // Valid only for video-only player tests. If |trigger_with_release_start| is
+ // true, triggers the browser seek with a Release() + video data received +
+ // Start() with a new surface. If false, triggers the browser seek by
+ // setting a new video surface after beginning decode of received video data.
+ // Such data receipt causes possibility that an I-frame is not next, and
+ // browser seek results once decode completes and surface change processing
+ // begins.
+ void BrowserSeekPlayer(bool trigger_with_release_start) {
+ int expected_num_data_requests = demuxer_->num_data_requests() + 1;
+ int expected_num_seek_requests = demuxer_->num_seek_requests();
+ int expected_num_browser_seek_requests =
+ demuxer_->num_browser_seek_requests();
+
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+
+ if (trigger_with_release_start) {
+ ReleasePlayer();
+
+ // Simulate demuxer's response to the video data request.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_EQ(expected_num_seek_requests, demuxer_->num_seek_requests());
+
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(false);
+ } else {
+ // Simulate demuxer's response to the video data request.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // While the decoder is decoding, trigger a browser seek by changing
+ // surface. Demuxer does not know of browser seek in advance, so no
+ // |kAborted| data is required (though |kAborted| can certainly occur for
+ // any pending read in reality due to renderer preparing for a regular
+ // seek).
+ CreateNextTextureAndSetVideoSurface();
+
+ // Browser seek should not begin until decoding has completed.
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(expected_num_seek_requests, demuxer_->num_seek_requests());
+
+ // Wait for the decoder job to finish decoding and be reset pending the
+ // browser seek.
+ while (GetMediaDecoderJob(false))
+ message_loop_.RunUntilIdle();
}
+ // Only one browser seek should have been initiated, and no further data
+ // should have been requested.
+ expected_num_seek_requests++;
+ expected_num_browser_seek_requests++;
+ EXPECT_EQ(expected_num_seek_requests, demuxer_->num_seek_requests());
+ EXPECT_EQ(expected_num_browser_seek_requests,
+ demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
+ }
+
+ // Creates a new decoder job and feeds it data ending with a |kConfigChanged|
+ // access unit. If |config_unit_in_prefetch| is true, sends feeds the config
+ // change AU in response to the job's first read request (prefetch). If
+ // false, regular data is fed and decoded prior to feeding the config change
+ // AU in response to the second data request (after prefetch completed).
+ // |config_unit_index| controls which access unit is |kConfigChanged|.
+ void StartConfigChange(bool is_audio,
+ bool config_unit_in_prefetch,
+ int config_unit_index) {
+ int expected_num_config_requests = demuxer_->num_config_requests();
+
+ EXPECT_FALSE(GetMediaDecoderJob(is_audio));
+ if (is_audio) {
+ StartAudioDecoderJob(true);
+ } else {
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ }
+
+ int expected_num_data_requests = demuxer_->num_data_requests();
+
+ // Feed and decode a standalone access unit so the player exits prefetch.
+ if (!config_unit_in_prefetch) {
+ if (is_audio)
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ else
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ message_loop_.Run();
+
+ // We should have completed the prefetch phase at this point.
+ expected_num_data_requests++;
+ EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
+ }
+
+ EXPECT_EQ(expected_num_config_requests, demuxer_->num_config_requests());
+
+ // Feed and decode access units with data for any units prior to
+ // |config_unit_index|, and a |kConfigChanged| unit at that index.
+ // Player should prepare to reconfigure the decoder job, and should request
+ // new demuxer configs.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(is_audio, config_unit_index));
+ WaitForDecodeDone(is_audio, !is_audio);
+
+ expected_num_config_requests++;
+ EXPECT_EQ(expected_num_data_requests, demuxer_->num_data_requests());
+ EXPECT_EQ(expected_num_config_requests, demuxer_->num_config_requests());
+ }
+
+ void CreateNextTextureAndSetVideoSurface() {
+ gfx::SurfaceTexture* surface_texture;
+ if (surface_texture_a_is_next_) {
+ surface_texture_a_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture = surface_texture_a_.get();
+ } else {
+ surface_texture_b_ = new gfx::SurfaceTexture(next_texture_id_++);
+ surface_texture = surface_texture_b_.get();
+ }
+
+ surface_texture_a_is_next_ = !surface_texture_a_is_next_;
+ gfx::ScopedJavaSurface surface = gfx::ScopedJavaSurface(surface_texture);
+ player_.SetVideoSurface(surface.Pass());
+ }
+
+ // Wait for one or both of the jobs to complete decoding. Decoder jobs are
+ // assumed to exist for any stream whose decode completion is awaited.
+ void WaitForDecodeDone(bool wait_for_audio, bool wait_for_video) {
+ DCHECK(wait_for_audio || wait_for_video);
+
+ while ((wait_for_audio && GetMediaDecoderJob(true) &&
+ GetMediaDecoderJob(true)->is_decoding()) ||
+ (wait_for_video && GetMediaDecoderJob(false) &&
+ GetMediaDecoderJob(false)->is_decoding())) {
+ message_loop_.RunUntilIdle();
+ }
+ }
+
+ void WaitForAudioDecodeDone() {
+ WaitForDecodeDone(true, false);
+ }
+
+ void WaitForVideoDecodeDone() {
+ WaitForDecodeDone(false, true);
+ }
+
+ void WaitForAudioVideoDecodeDone() {
+ WaitForDecodeDone(true, true);
+ }
+
+ // If |send_eos| is true, generates EOS for the stream corresponding to
+ // |eos_for_audio|. Verifies that playback completes and no further data
+ // is requested.
+ // If |send_eos| is false, then it is assumed that caller previously arranged
+ // for player to receive EOS for each stream, but the player has not yet
+ // decoded all of them. In this case, |eos_for_audio| is ignored.
+ void VerifyPlaybackCompletesOnEOSDecode(bool send_eos, bool eos_for_audio) {
+ int original_num_data_requests = demuxer_->num_data_requests();
+ if (send_eos)
+ player_.OnDemuxerDataAvailable(CreateEOSAck(eos_for_audio));
+ EXPECT_FALSE(manager_.playback_completed());
+ message_loop_.Run();
+ EXPECT_TRUE(manager_.playback_completed());
+ EXPECT_EQ(original_num_data_requests, demuxer_->num_data_requests());
+ }
+
+ void VerifyCompletedPlaybackResumesOnSeekPlusStart(bool have_audio,
+ bool have_video) {
+ DCHECK(have_audio || have_video);
+
+ EXPECT_TRUE(manager_.playback_completed());
+
+ player_.SeekTo(base::TimeDelta());
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ Start(CreateDemuxerConfigs(have_audio, have_video), true);
+ }
+
+ // Starts the appropriate decoder jobs according to |have_audio| and
+ // |have_video|. Then starts seek during decode of EOS or non-EOS according to
+ // |eos_audio| and |eos_video|. Simulates seek completion and verifies that
+ // playback never completed. |eos_{audio,video}| is ignored if the
+ // corresponding |have_{audio,video}| is false.
+ void VerifySeekDuringEOSDecodePreventsPlaybackCompletion(bool have_audio,
+ bool have_video,
+ bool eos_audio,
+ bool eos_video) {
+ DCHECK(have_audio || have_video);
+
+ if (have_video)
+ CreateNextTextureAndSetVideoSurface();
+
+ Start(CreateDemuxerConfigs(have_audio, have_video), true);
+
+ if (have_audio)
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+
+ if (have_video)
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+
+ // Run until more data is requested a number of times equal to the number of
+ // media types configured. Since prefetching may be in progress, we cannot
+ // reliably expect Run() to complete until we have sent demuxer data for all
+ // configured media types, above.
+ for (int i = 0; i < (have_audio ? 1 : 0) + (have_video ? 1 : 0); i++)
+ message_loop_.Run();
+
+ // Simulate seek while decoding EOS or non-EOS for the appropriate
+ // stream(s).
+ if (have_audio) {
+ if (eos_audio)
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true));
+ else
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(1));
+ }
+
+ if (have_video) {
+ if (eos_video)
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false));
+ else
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
+ }
+
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+ WaitForDecodeDone(have_audio, have_video);
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_FALSE(manager_.playback_completed());
+ }
+
base::TimeTicks StartTimeTicks() {
return player_.start_time_ticks_;
}
@@ -214,220 +684,236 @@ class MediaSourcePlayerTest : public testing::Test {
scheme_uuid, security_level, container, codecs);
}
- void CreateAndSetVideoSurface() {
- surface_texture_ = new gfx::SurfaceTexture(0);
- surface_ = gfx::ScopedJavaSurface(surface_texture_.get());
- player_.SetVideoSurface(surface_.Pass());
- }
-
- protected:
base::MessageLoop message_loop_;
MockMediaPlayerManager manager_;
- MockDemuxerAndroid demuxer_;
+ MockDemuxerAndroid* demuxer_; // Owned by |player_|.
MediaSourcePlayer player_;
- scoped_refptr<gfx::SurfaceTexture> surface_texture_;
- gfx::ScopedJavaSurface surface_;
+
+ // Track whether a possibly asynch decoder callback test hook has run.
+ bool decoder_callback_hook_executed_;
+
+ // We need to keep the surface texture while the decoder is actively decoding.
+ // Otherwise, it may trigger unexpected crashes on some devices. To switch
+ // surfaces, tests need to create a new surface texture without releasing
+ // their previous one. In CreateNextTextureAndSetVideoSurface(), we toggle
+ // between two surface textures, only replacing the N-2 texture. Assumption is
+ // that no more than N-1 texture is in use by decoder when
+ // CreateNextTextureAndSetVideoSurface() is called.
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_a_;
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_b_;
+ bool surface_texture_a_is_next_;
+ int next_texture_id_;
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
};
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithValidConfig) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will be created when codec is successfully started.
- StartAudioDecoderJob();
- EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_.num_requests());
+ StartAudioDecoderJob(true);
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test audio decoder job will not be created when failed to start the codec.
- DemuxerConfigs configs;
- configs.audio_codec = kCodecVorbis;
- configs.audio_channels = 2;
- configs.audio_sampling_rate = 44100;
- configs.is_audio_encrypted = false;
- configs.duration_ms = kDefaultDurationInMs;
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
+ // Replace with invalid |audio_extra_data|
+ configs.audio_extra_data.clear();
uint8 invalid_codec_data[] = { 0x00, 0xff, 0xff, 0xff, 0xff };
configs.audio_extra_data.insert(configs.audio_extra_data.begin(),
invalid_codec_data, invalid_codec_data + 4);
- Start(configs);
- EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, demuxer_.num_requests());
+ Start(configs, false);
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test video decoder job will be created when surface is valid.
- StartVideoDecoderJob();
// Video decoder job will not be created until surface is available.
- EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, demuxer_.num_requests());
-
- CreateAndSetVideoSurface();
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- // The decoder job should be ready now.
- EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, demuxer_.num_requests());
+ StartVideoDecoderJob(false);
+
+ // Set both an initial and a later video surface without receiving any
+ // demuxed data yet.
+ CreateNextTextureAndSetVideoSurface();
+ MediaDecoderJob* first_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(first_job);
+ CreateNextTextureAndSetVideoSurface();
+
+ // Setting another surface will not create a new job until any pending
+ // read is satisfied (and job is no longer decoding).
+ EXPECT_EQ(first_job, GetMediaDecoderJob(false));
+
+ // No seeks, even on setting surface, should have occurred. (Browser seeks can
+ // occur on setting surface, but only after previously receiving video data.)
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+
+ // Note, the decoder job for the second surface set, above, will be created
+ // only after the pending read is satisfied and decoded, and the resulting
+ // browser seek is done. See BrowserSeek_* tests for this coverage.
}
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithInvalidSurface) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test video decoder job will be created when surface is valid.
+ // Test video decoder job will not be created when surface is invalid.
scoped_refptr<gfx::SurfaceTexture> surface_texture(
new gfx::SurfaceTexture(0));
gfx::ScopedJavaSurface surface(surface_texture.get());
- StartVideoDecoderJob();
- // Video decoder job will not be created until surface is available.
- EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, demuxer_.num_requests());
+ StartVideoDecoderJob(false);
// Release the surface texture.
surface_texture = NULL;
player_.SetVideoSurface(surface.Pass());
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_EQ(NULL, GetMediaDecoderJob(false));
- EXPECT_EQ(0, demuxer_.num_requests());
+
+ // Player should not seek the demuxer on setting initial surface.
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_EQ(0, demuxer_->num_data_requests());
}
TEST_F(MediaSourcePlayerTest, ReadFromDemuxerAfterSeek) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test decoder job will resend a ReadFromDemuxer request after seek.
- StartAudioDecoderJob();
- EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_.num_requests());
-
- // Initiate a seek
- player_.SeekTo(base::TimeDelta());
-
- // Verify that the seek does not occur until the initial prefetch
- // completes.
- EXPECT_EQ(0u, demuxer_.last_seek_request_id());
-
- // Simulate aborted read caused by the seek. This aborts the initial
- // prefetch.
- DemuxerData data;
- data.type = DemuxerStream::AUDIO;
- data.access_units.resize(1);
- data.access_units[0].status = DemuxerStream::kAborted;
- player_.OnDemuxerDataAvailable(data);
-
- // Verify that the seek is requested now that the initial prefetch
- // has completed.
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
-
- // Sending back the seek ACK, this should trigger the player to call
- // OnReadFromDemuxer() again.
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_EQ(2, demuxer_.num_requests());
+ StartAudioDecoderJob(true);
+ SeekPlayerWithAbort(true, base::TimeDelta());
}
TEST_F(MediaSourcePlayerTest, SetSurfaceWhileSeeking) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test SetVideoSurface() will not cause an extra seek while the player is
- // waiting for a seek ACK.
- StartVideoDecoderJob();
+ // waiting for demuxer to indicate seek is done.
// Player is still waiting for SetVideoSurface(), so no request is sent.
- EXPECT_EQ(0, demuxer_.num_requests());
+ StartVideoDecoderJob(false); // Verifies no data requested.
+
+ // Initiate a seek. Skip requesting element seek of renderer.
+ // Instead behave as if the renderer has asked us to seek.
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
player_.SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ CreateNextTextureAndSetVideoSurface();
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
- CreateAndSetVideoSurface();
- EXPECT_TRUE(NULL == GetMediaDecoderJob(false));
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ // Reconfirm player has not yet requested data.
+ EXPECT_EQ(0, demuxer_->num_data_requests());
- // Send the seek ack, player should start requesting data afterwards.
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_TRUE(NULL != GetMediaDecoderJob(false));
- EXPECT_EQ(1, demuxer_.num_requests());
+ // Send the seek done notification. The player should start requesting data.
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Reconfirm exactly 1 seek request has been made of demuxer, and that it
+ // was not a browser seek request.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+ EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
}
TEST_F(MediaSourcePlayerTest, ChangeMultipleSurfaceWhileDecoding) {
- if (!MediaCodecBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
- return;
- }
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test MediaSourcePlayer can switch multiple surfaces during decoding.
- CreateAndSetVideoSurface();
- StartVideoDecoderJob();
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- EXPECT_EQ(0, demuxer_.num_requests());
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
// Send the first input chunk.
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_EQ(1, demuxer_.num_requests());
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
// While the decoder is decoding, change multiple surfaces. Pass an empty
// surface first.
gfx::ScopedJavaSurface empty_surface;
player_.SetVideoSurface(empty_surface.Pass());
- // Pass a new non-empty surface.
- CreateAndSetVideoSurface();
+ // Next, pass a new non-empty surface.
+ CreateNextTextureAndSetVideoSurface();
- // Wait for the decoder job to finish decoding.
- while(GetMediaDecoderJob(false)->is_decoding())
+ // Wait for the decoder job to finish decoding and be reset pending a browser
+ // seek.
+ while (GetMediaDecoderJob(false))
message_loop_.RunUntilIdle();
- // A seek should be initiated to request Iframe.
- EXPECT_EQ(2u, demuxer_.last_seek_request_id());
- EXPECT_EQ(1, demuxer_.num_requests());
+
+ // Only one browser seek should have been initiated. No further data request
+ // should have been processed on |message_loop_| before surface change event
+ // became pending, above.
+ EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Simulate browser seek is done and confirm player requests more data for new
+ // video decoder job.
+ player_.OnDemuxerSeekDone(player_.GetCurrentTime());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
}
-TEST_F(MediaSourcePlayerTest, StartAfterSeekFinish) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+TEST_F(MediaSourcePlayerTest, AudioOnlyStartAfterSeekFinish) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test decoder job will not start until all pending seek event is handled.
- DemuxerConfigs configs;
- configs.audio_codec = kCodecVorbis;
- configs.audio_channels = 2;
- configs.audio_sampling_rate = 44100;
- configs.is_audio_encrypted = false;
- configs.duration_ms = kDefaultDurationInMs;
+ // Test audio decoder job will not start until pending seek event is handled.
+ DemuxerConfigs configs = CreateAudioDemuxerConfigs(kCodecVorbis);
player_.OnDemuxerConfigsAvailable(configs);
- EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, demuxer_.num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
- // Initiate a seek
+ // Initiate a seek. Skip requesting element seek of renderer.
+ // Instead behave as if the renderer has asked us to seek.
player_.SeekTo(base::TimeDelta());
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
player_.Start();
- EXPECT_EQ(NULL, GetMediaDecoderJob(true));
- EXPECT_EQ(0, demuxer_.num_requests());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_EQ(0, demuxer_->num_data_requests());
+
+ // Sending back the seek done notification.
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_EQ(1, demuxer_->num_data_requests());
- // Sending back the seek ACK.
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_.num_requests());
+ // Reconfirm exactly 1 seek request has been made of demuxer.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, VideoOnlyStartAfterSeekFinish) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test video decoder job will not start until pending seek event is handled.
+ CreateNextTextureAndSetVideoSurface();
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs();
+ player_.OnDemuxerConfigsAvailable(configs);
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+
+ // Initiate a seek. Skip requesting element seek of renderer.
+ // Instead behave as if the renderer has asked us to seek.
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ player_.Start();
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_EQ(0, demuxer_->num_data_requests());
+
+ // Sending back the seek done notification.
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Reconfirm exactly 1 seek request has been made of demuxer.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test that if the decoding job is not fully stopped after Pause(),
// calling Start() will be a noop.
- StartAudioDecoderJob();
+ StartAudioDecoderJob(true);
MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
- EXPECT_TRUE(NULL != decoder_job);
- EXPECT_EQ(1, demuxer_.num_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
// Sending data to player.
@@ -443,64 +929,46 @@ TEST_F(MediaSourcePlayerTest, StartImmediatelyAfterPause) {
player_.Start();
// Verify that Start() will not destroy and recreate the decoder job.
EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_.num_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
message_loop_.Run();
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(2, demuxer_.num_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
EXPECT_FALSE(GetMediaDecoderJob(true)->is_decoding());
}
TEST_F(MediaSourcePlayerTest, DecoderJobsCannotStartWithoutAudio) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test that when Start() is called, video decoder jobs will wait for audio
// decoder job before start decoding the data.
- DemuxerConfigs configs;
- configs.audio_codec = kCodecVorbis;
- configs.audio_channels = 2;
- configs.audio_sampling_rate = 44100;
- configs.is_audio_encrypted = false;
- scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("vorbis-extradata");
- configs.audio_extra_data = std::vector<uint8>(
- buffer->data(),
- buffer->data() + buffer->data_size());
- configs.video_codec = kCodecVP8;
- configs.video_size = gfx::Size(320, 240);
- configs.is_video_encrypted = false;
- configs.duration_ms = kDefaultDurationInMs;
- Start(configs);
- EXPECT_EQ(0, demuxer_.num_requests());
-
- CreateAndSetVideoSurface();
- EXPECT_EQ(1u, demuxer_.last_seek_request_id());
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
-
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs(), true);
MediaDecoderJob* audio_decoder_job = GetMediaDecoderJob(true);
MediaDecoderJob* video_decoder_job = GetMediaDecoderJob(false);
- EXPECT_EQ(2, demuxer_.num_requests());
+
EXPECT_FALSE(audio_decoder_job->is_decoding());
EXPECT_FALSE(video_decoder_job->is_decoding());
- // Sending audio data to player, audio decoder should not start.
+ // Sending video data to player, video decoder should not start.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
EXPECT_FALSE(video_decoder_job->is_decoding());
- // Sending video data to player, both decoders should start now.
+ // Sending audio data to player, both decoders should start now.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
EXPECT_TRUE(audio_decoder_job->is_decoding());
EXPECT_TRUE(video_decoder_job->is_decoding());
+
+ // No seeks should have occurred.
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test start time ticks will reset after decoder job underruns.
- StartAudioDecoderJob();
- EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
- EXPECT_EQ(1, demuxer_.num_requests());
+ StartAudioDecoderJob(true);
+
// For the first couple chunks, the decoder job may return
// DECODE_FORMAT_CHANGED status instead of DECODE_SUCCEEDED status. Decode
// more frames to guarantee that DECODE_SUCCEEDED will be returned.
@@ -511,7 +979,7 @@ TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
}
// The decoder job should finish and a new request will be sent.
- EXPECT_EQ(5, demuxer_.num_requests());
+ EXPECT_EQ(5, demuxer_->num_data_requests());
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
base::TimeTicks previous = StartTimeTicks();
@@ -525,8 +993,7 @@ TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
// Send new data to the decoder so it can finish the currently
// pending decode.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(3));
- while(GetMediaDecoderJob(true)->is_decoding())
- message_loop_.RunUntilIdle();
+ WaitForAudioDecodeDone();
// Verify the start time ticks is cleared at this point because the
// player is prefetching.
@@ -541,107 +1008,1046 @@ TEST_F(MediaSourcePlayerTest, StartTimeTicksResetAfterDecoderUnderruns) {
EXPECT_LE(100.0, (current - previous).InMillisecondsF());
}
-TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterInputEOS) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+TEST_F(MediaSourcePlayerTest, V_SecondAccessUnitIsEOSAndResumePlayAfterSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test MediaSourcePlayer can replay video after input EOS is reached.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
- // Test MediaSourcePlayer will not request for new data after input EOS is
- // reached.
- CreateAndSetVideoSurface();
- StartVideoDecoderJob();
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_EQ(1, demuxer_.num_requests());
// Send the first input chunk.
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
message_loop_.Run();
- EXPECT_EQ(2, demuxer_.num_requests());
- // Send EOS.
- player_.OnDemuxerDataAvailable(CreateEOSAck(false));
- message_loop_.Run();
- // No more request for data should be made.
- EXPECT_EQ(2, demuxer_.num_requests());
+ VerifyPlaybackCompletesOnEOSDecode(true, false);
+ VerifyCompletedPlaybackResumesOnSeekPlusStart(false, true);
}
-TEST_F(MediaSourcePlayerTest, ReplayAfterInputEOS) {
- if (!MediaCodecBridge::IsAvailable())
- return;
+TEST_F(MediaSourcePlayerTest, A_FirstAccessUnitIsEOSAndResumePlayAfterSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test MediaSourcePlayer can replay after input EOS is
- // reached.
- CreateAndSetVideoSurface();
- StartVideoDecoderJob();
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- EXPECT_EQ(1, demuxer_.num_requests());
- // Send the first input chunk.
+ // Test decode of audio EOS buffer without any prior decode. See also
+ // http://b/11696552.
+ // Also tests that seeking+Start() after completing audio playback resumes
+ // playback.
+ Start(CreateAudioDemuxerConfigs(kCodecAAC), true);
+ VerifyPlaybackCompletesOnEOSDecode(true, true);
+ VerifyCompletedPlaybackResumesOnSeekPlusStart(true, false);
+}
+
+TEST_F(MediaSourcePlayerTest, V_FirstAccessUnitAfterSeekIsEOS) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decode of video EOS buffer, just after seeking, without any prior
+ // decode (other than the simulated |kAborted| resulting from the seek
+ // process.)
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ SeekPlayerWithAbort(false, base::TimeDelta());
+ VerifyPlaybackCompletesOnEOSDecode(true, false);
+}
+
+TEST_F(MediaSourcePlayerTest, A_FirstAccessUnitAfterSeekIsEOS) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decode of audio EOS buffer, just after seeking, without any prior
+ // decode (other than the simulated |kAborted| resulting from the seek
+ // process.) See also http://b/11696552.
+ Start(CreateAudioDemuxerConfigs(kCodecAAC), true);
+ SeekPlayerWithAbort(true, base::TimeDelta());
+ VerifyPlaybackCompletesOnEOSDecode(true, true);
+}
+
+TEST_F(MediaSourcePlayerTest, AV_PlaybackCompletionAcrossConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if one stream (audio) has completed decode of EOS and the other
+ // stream (video) processes config change, that subsequent video EOS completes
+ // A/V playback.
+ // Also tests that seeking+Start() after completing playback resumes playback.
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs(), true);
+
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
+ EXPECT_EQ(0, demuxer_->num_config_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckWithConfigChanged(
+ false, 0)); // Video |kConfigChanged| as first unit.
+
+ WaitForAudioVideoDecodeDone();
+
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+
+ // At no time after completing audio EOS decode, above, should the
+ // audio decoder job resume decoding. Send and decode video EOS.
+ VerifyPlaybackCompletesOnEOSDecode(true, false);
+ VerifyCompletedPlaybackResumesOnSeekPlusStart(true, true);
+}
+
+TEST_F(MediaSourcePlayerTest, VA_PlaybackCompletionAcrossConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if one stream (video) has completed decode of EOS and the other
+ // stream (audio) processes config change, that subsequent audio EOS completes
+ // A/V playback.
+ // Also tests that seeking+Start() after completing playback resumes playback.
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs(), true);
+
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
+ EXPECT_EQ(0, demuxer_->num_config_requests());
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckWithConfigChanged(
+ true, 0)); // Audio |kConfigChanged| as first unit.
+
+ WaitForAudioVideoDecodeDone();
+
+ // TODO(wolenetz/qinmin): Prevent redundant demuxer config request and change
+ // expectation to 1 here. See http://crbug.com/325528.
+ EXPECT_EQ(2, demuxer_->num_config_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+
+ // At no time after completing video EOS decode, above, should the
+ // video decoder job resume decoding. Send and decode audio EOS.
+ VerifyPlaybackCompletesOnEOSDecode(true, true);
+ VerifyCompletedPlaybackResumesOnSeekPlusStart(true, true);
+}
+
+TEST_F(MediaSourcePlayerTest, AV_NoPrefetchForFinishedVideoOnAudioStarvation) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if one stream (video) has completed decode of EOS, prefetch
+ // resulting from player starvation occurs only for the other stream (audio),
+ // and responding to that prefetch with EOS completes A/V playback, even if
+ // another starvation occurs during the latter EOS's decode.
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs(), true);
+
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
+
+ // Wait until video EOS is processed and more data (assumed to be audio) is
+ // requested.
+ while (demuxer_->num_data_requests() < 3)
+ message_loop_.RunUntilIdle();
+ WaitForVideoDecodeDone();
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+
+ // Simulate decoder underrun to trigger prefetch while still decoding audio.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(1));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding() &&
+ !GetMediaDecoderJob(false)->is_decoding());
+ TriggerPlayerStarvation();
+
+ // Complete the audio decode that was in progress when simulated player
+ // starvation was triggered.
+ WaitForAudioDecodeDone();
+ EXPECT_EQ(4, demuxer_->num_data_requests());
+
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
+ EXPECT_FALSE(GetMediaDecoderJob(false)->is_decoding());
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Simulate another decoder underrun to trigger prefetch while decoding EOS.
+ TriggerPlayerStarvation();
+ VerifyPlaybackCompletesOnEOSDecode(false, true /* ignored */);
+}
+
+TEST_F(MediaSourcePlayerTest, V_StarvationDuringEOSDecode) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that video-only playback completes without further data requested when
+ // starvation occurs during EOS decode.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo());
message_loop_.Run();
- EXPECT_EQ(2, demuxer_.num_requests());
- // Send EOS.
- player_.OnDemuxerDataAvailable(CreateEOSAck(false));
+ // Simulate decoder underrun to trigger prefetch while decoding EOS.
+ player_.OnDemuxerDataAvailable(CreateEOSAck(false)); // Video EOS
+ EXPECT_TRUE(GetMediaDecoderJob(false)->is_decoding());
+ TriggerPlayerStarvation();
+ VerifyPlaybackCompletesOnEOSDecode(false, false /* ignored */);
+}
+
+TEST_F(MediaSourcePlayerTest, A_StarvationDuringEOSDecode) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that audio-only playback completes without further data requested when
+ // starvation occurs during EOS decode.
+ StartAudioDecoderJob(true);
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
message_loop_.Run();
- // No more request for data should be made.
- EXPECT_EQ(2, demuxer_.num_requests());
- player_.SeekTo(base::TimeDelta());
- StartVideoDecoderJob();
- player_.OnDemuxerSeeked(demuxer_.last_seek_request_id());
- // Seek/Play after EOS should request more data.
- EXPECT_EQ(3, demuxer_.num_requests());
+ // Simulate decoder underrun to trigger prefetch while decoding EOS.
+ player_.OnDemuxerDataAvailable(CreateEOSAck(true)); // Audio EOS
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ TriggerPlayerStarvation();
+ VerifyPlaybackCompletesOnEOSDecode(false, true /* ignored */);
+}
+
+TEST_F(MediaSourcePlayerTest, AV_SeekDuringEOSDecodePreventsCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that seek supercedes audio+video playback completion on simultaneous
+ // audio and video EOS decode, if SeekTo() occurs during these EOS decodes.
+ VerifySeekDuringEOSDecodePreventsPlaybackCompletion(true, true, true, true);
+}
+
+TEST_F(MediaSourcePlayerTest, AV_SeekDuringAudioEOSDecodePreventsCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that seek supercedes audio+video playback completion on simultaneous
+ // audio EOS and video non-EOS decode, if SeekTo() occurs during these
+ // decodes.
+ VerifySeekDuringEOSDecodePreventsPlaybackCompletion(true, true, true, false);
+}
+
+TEST_F(MediaSourcePlayerTest, AV_SeekDuringVideoEOSDecodePreventsCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that seek supercedes audio+video playback completion on simultaneous
+ // audio non-EOS and video EOS decode, if SeekTo() occurs during these
+ // decodes.
+ VerifySeekDuringEOSDecodePreventsPlaybackCompletion(true, true, false, true);
+}
+
+TEST_F(MediaSourcePlayerTest, V_SeekDuringEOSDecodePreventsCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that seek supercedes video-only playback completion on EOS decode, if
+ // SeekTo() occurs during EOS decode.
+ VerifySeekDuringEOSDecodePreventsPlaybackCompletion(false, true, false, true);
+}
+
+TEST_F(MediaSourcePlayerTest, A_SeekDuringEOSDecodePreventsCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that seek supercedes audio-only playback completion on EOS decode, if
+ // SeekTo() occurs during EOS decode.
+ VerifySeekDuringEOSDecodePreventsPlaybackCompletion(true, false, true, false);
}
TEST_F(MediaSourcePlayerTest, NoRequestForDataAfterAbort) {
- if (!MediaCodecBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
- return;
- }
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
- // Test that the decoder will request new data after receiving an aborted
+ // Test that the decoder will not request new data after receiving an aborted
// access unit.
- StartAudioDecoderJob();
- EXPECT_EQ(1, demuxer_.num_requests());
+ StartAudioDecoderJob(true);
// Send an aborted access unit.
- DemuxerData data;
- data.type = DemuxerStream::AUDIO;
- data.access_units.resize(1);
- data.access_units[0].status = DemuxerStream::kAborted;
- player_.OnDemuxerDataAvailable(data);
+ player_.OnDemuxerDataAvailable(CreateAbortedAck(true));
EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
- // Wait for the decoder job to finish decoding.
- while(GetMediaDecoderJob(true)->is_decoding())
- message_loop_.RunUntilIdle();
+ WaitForAudioDecodeDone();
// No request will be sent for new data.
- EXPECT_EQ(1, demuxer_.num_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // No seek requests should have occurred.
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
}
TEST_F(MediaSourcePlayerTest, DemuxerDataArrivesAfterRelease) {
- if (!MediaCodecBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
- return;
- }
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
// Test that the decoder should not crash if demuxer data arrives after
// Release().
- StartAudioDecoderJob();
- EXPECT_TRUE(player_.IsPlaying());
- EXPECT_EQ(1, demuxer_.num_requests());
- EXPECT_TRUE(NULL != GetMediaDecoderJob(true));
+ StartAudioDecoderJob(true);
- player_.Release();
+ ReleasePlayer();
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
// The decoder job should have been released.
EXPECT_FALSE(player_.IsPlaying());
- EXPECT_EQ(1, demuxer_.num_requests());
+
+ // No further data should have been requested.
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // No seek requests should have occurred.
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, BrowserSeek_RegularSeekPendsBrowserSeekDone) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that a browser seek, once started, delays a newly arrived regular
+ // SeekTo() request's demuxer seek until the browser seek is done.
+ BrowserSeekPlayer(false);
+
+ // Simulate renderer requesting a regular seek while browser seek in progress.
+ player_.SeekTo(base::TimeDelta());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+
+ // Simulate browser seek is done. Confirm player requests the regular seek,
+ // still has no video decoder job configured, and has not requested any
+ // further data since the surface change event became pending in
+ // BrowserSeekPlayer().
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+ player_.OnDemuxerSeekDone(base::TimeDelta());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_EQ(2, demuxer_->num_seek_requests());
+ EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Simulate regular seek is done and confirm player requests more data for
+ // new video decoder job.
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(2, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, NoSeekForInitialReleaseAndStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that no seek is requested if player Release() + Start() occurs prior
+ // to receiving any data.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ ReleasePlayer();
+
+ // Pass a new non-empty surface.
+ CreateNextTextureAndSetVideoSurface();
+
+ player_.Start();
+
+ // TODO(wolenetz/qinmin): Multiple in-flight data requests for same stream
+ // should be prevented. See http://crbug.com/306314.
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, BrowserSeek_MidStreamReleaseAndStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that one browser seek is requested if player Release() + Start(), with
+ // video data received between Release() and Start().
+ BrowserSeekPlayer(true);
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Simulate browser seek is done and confirm player requests more data.
+ player_.OnDemuxerSeekDone(base::TimeDelta());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, PrerollAudioAfterSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will preroll the media to the seek position.
+ StartAudioDecoderJob(true);
+
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // Send some data before the seek position.
+ for (int i = 1; i < 4; ++i) {
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_TRUE(IsPrerolling(true));
+
+ // Send data after the seek position.
+ DemuxerData data = CreateReadFromDemuxerAckForAudio(3);
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.Run();
+ EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_FALSE(IsPrerolling(true));
+}
+
+TEST_F(MediaSourcePlayerTest, PrerollVideoAfterSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will preroll the media to the seek position.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+
+ SeekPlayerWithAbort(false, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(false));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // Send some data before the seek position.
+ DemuxerData data;
+ for (int i = 1; i < 4; ++i) {
+ data = CreateReadFromDemuxerAckForVideo();
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 30);
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(false)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_TRUE(IsPrerolling(false));
+
+ // Send data at the seek position.
+ data = CreateReadFromDemuxerAckForVideo();
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.Run();
+
+ // TODO(wolenetz/qinmin): Player's maintenance of current time for video-only
+ // streams depends on decoder output, which may be initially inaccurate, and
+ // encoded video test data may also need updating. Verify at least that AU
+ // timestamp-based preroll logic has determined video preroll has completed.
+ // See http://crbug.com/310823 and http://b/11356652.
+ EXPECT_FALSE(IsPrerolling(false));
+}
+
+TEST_F(MediaSourcePlayerTest, SeekingAfterCompletingPrerollRestartsPreroll) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will begin prerolling upon seek, when it was not
+ // prerolling prior to the seek.
+ StartAudioDecoderJob(true);
+ MediaDecoderJob* decoder_job = GetMediaDecoderJob(true);
+ EXPECT_TRUE(IsPrerolling(true));
+
+ // Complete the initial preroll by feeding data to the decoder.
+ for (int i = 0; i < 4; ++i) {
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_LT(0.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_FALSE(IsPrerolling(true));
+
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(500));
+
+ // Prerolling should have begun again.
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(500.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // Send data at and after the seek position. Prerolling should complete.
+ for (int i = 0; i < 4; ++i) {
+ DemuxerData data = CreateReadFromDemuxerAckForAudio(i);
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(
+ 500 + 30 * (i - 1));
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_LT(500.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_FALSE(IsPrerolling(true));
+
+ // Throughout this test, we should have not re-created the decoder job, so
+ // IsPrerolling() transition from false to true was not due to constructor
+ // initialization. It was due to BeginPrerolling().
+ EXPECT_EQ(decoder_job, GetMediaDecoderJob(true));
+}
+
+TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossReleaseAndStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will resume media prerolling if interrupted by Release()
+ // and Start().
+ StartAudioDecoderJob(true);
+
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // Send some data before the seek position.
+ // Test uses 'large' number of iterations because decoder job may not get
+ // MEDIA_CODEC_OK output status until after a few dequeue output attempts.
+ // This allows decoder status to stabilize prior to AU timestamp reaching
+ // the preroll target.
+ DemuxerData data;
+ for (int i = 0; i < 10; ++i) {
+ data = CreateReadFromDemuxerAckForAudio(3);
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 10);
+ if (i == 1) {
+ // While still prerolling, Release() and Start() the player.
+ // TODO(qinmin): Simulation of multiple in-flight data requests (one from
+ // before Release(), one from after Start()) is not included here, and
+ // neither is any data enqueued for later decode if it arrives after
+ // Release() and before Start(). See http://crbug.com/306314. Assumption
+ // for this test, to prevent flakiness until the bug is fixed, is the
+ // first request's data arrives before Start(). Though that data is not
+ // seen by decoder, this assumption allows preroll continuation
+ // verification and prevents multiple in-flight data requests.
+ ReleasePlayer();
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.RunUntilIdle();
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ StartAudioDecoderJob(true);
+ } else {
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_TRUE(IsPrerolling(true));
+ }
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_TRUE(IsPrerolling(true));
+
+ // Send data after the seek position.
+ data = CreateReadFromDemuxerAckForAudio(3);
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.Run();
+ EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_FALSE(IsPrerolling(true));
+}
+
+TEST_F(MediaSourcePlayerTest, PrerollContinuesAcrossConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will resume media prerolling if interrupted by
+ // |kConfigChanged| and OnDemuxerConfigsAvailable().
+ StartAudioDecoderJob(true);
+
+ SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // In response to data request, simulate that demuxer signals config change by
+ // sending an AU with |kConfigChanged|. Player should prepare to reconfigure
+ // the audio decoder job, and should request new demuxer configs.
+ DemuxerData data = CreateReadFromDemuxerAckWithConfigChanged(true, 0);
+ EXPECT_EQ(0, demuxer_->num_config_requests());
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+
+ // Simulate arrival of new configs.
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
+
+ // Send some data before the seek position.
+ for (int i = 1; i < 4; ++i) {
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(i));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+ message_loop_.Run();
+ }
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_TRUE(IsPrerolling(true));
+
+ // Send data after the seek position.
+ data = CreateReadFromDemuxerAckForAudio(3);
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(100);
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.Run();
+ EXPECT_LT(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_FALSE(IsPrerolling(true));
+}
+
+TEST_F(MediaSourcePlayerTest, SimultaneousAudioVideoConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player allows simultaneous audio and video config change,
+ // such as might occur during OnPrefetchDone() if next access unit for both
+ // audio and video jobs is |kConfigChanged|.
+ CreateNextTextureAndSetVideoSurface();
+ Start(CreateAudioVideoDemuxerConfigs(), true);
+ MediaDecoderJob* first_audio_job = GetMediaDecoderJob(true);
+ MediaDecoderJob* first_video_job = GetMediaDecoderJob(false);
+
+ // Simulate audio |kConfigChanged| prefetched as standalone access unit.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(true, 0));
+ EXPECT_EQ(0, demuxer_->num_config_requests()); // No OnPrefetchDone() yet.
+
+ // Simulate video |kConfigChanged| prefetched as standalone access unit.
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(false, 0));
+ EXPECT_EQ(1, demuxer_->num_config_requests()); // OnPrefetchDone() occurred.
+ EXPECT_EQ(2, demuxer_->num_data_requests()); // No more data requested yet.
+
+ // No job re-creation should occur until the requested configs arrive.
+ EXPECT_EQ(first_audio_job, GetMediaDecoderJob(true));
+ EXPECT_EQ(first_video_job, GetMediaDecoderJob(false));
+
+ player_.OnDemuxerConfigsAvailable(CreateAudioVideoDemuxerConfigs());
+ EXPECT_EQ(4, demuxer_->num_data_requests());
+ MediaDecoderJob* second_audio_job = GetMediaDecoderJob(true);
+ MediaDecoderJob* second_video_job = GetMediaDecoderJob(false);
+ EXPECT_NE(first_audio_job, second_audio_job);
+ EXPECT_NE(first_video_job, second_video_job);
+ EXPECT_TRUE(second_audio_job && second_video_job);
+
+ // Confirm no further demuxer configs requested.
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit0) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player detects need for and requests demuxer configs if
+ // the |kConfigChanged| unit is the very first unit in the set of units
+ // received in OnDemuxerDataAvailable() ostensibly while
+ // |PREFETCH_DONE_EVENT_PENDING|.
+ StartConfigChange(true, true, 0);
+}
+
+TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInPrefetchUnit1) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player detects need for and requests demuxer configs if
+ // the |kConfigChanged| unit is not the first unit in the set of units
+ // received in OnDemuxerDataAvailable() ostensibly while
+ // |PREFETCH_DONE_EVENT_PENDING|.
+ StartConfigChange(true, true, 1);
+}
+
+TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit0AfterPrefetch) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player detects need for and requests demuxer configs if
+ // the |kConfigChanged| unit is the very first unit in the set of units
+ // received in OnDemuxerDataAvailable() from data requested ostensibly while
+ // not prefetching.
+ StartConfigChange(true, false, 0);
+}
+
+TEST_F(MediaSourcePlayerTest, DemuxerConfigRequestedIfInUnit1AfterPrefetch) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the player detects need for and requests demuxer configs if
+ // the |kConfigChanged| unit is not the first unit in the set of units
+ // received in OnDemuxerDataAvailable() from data requested ostensibly while
+ // not prefetching.
+ StartConfigChange(true, false, 1);
+}
+
+TEST_F(MediaSourcePlayerTest, BrowserSeek_PrerollAfterBrowserSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test decoder job will preroll the media to the actual seek position
+ // resulting from a browser seek.
+ BrowserSeekPlayer(false);
+
+ // Simulate browser seek is done, but to a later time than was requested.
+ EXPECT_LT(player_.GetCurrentTime().InMillisecondsF(), 100);
+ player_.OnDemuxerSeekDone(base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // Send some data with access unit timestamps before the actual browser seek
+ // position. This is a bit unrealistic in this case where the browser seek
+ // jumped forward and next data from demuxer would normally begin at this
+ // browser seek position, immediately completing preroll. For simplicity and
+ // coverage, this test simulates the more common condition that AUs received
+ // after browser seek begin with timestamps before the seek target, and don't
+ // immediately complete preroll.
+ DemuxerData data;
+ for (int i = 1; i < 4; ++i) {
+ data = CreateReadFromDemuxerAckForVideo();
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(i * 30);
+ player_.OnDemuxerDataAvailable(data);
+ EXPECT_TRUE(GetMediaDecoderJob(false)->is_decoding());
+ message_loop_.Run();
+ EXPECT_TRUE(IsPrerolling(false));
+ }
+
+ EXPECT_EQ(100.0, player_.GetCurrentTime().InMillisecondsF());
+
+ // Send data after the browser seek position.
+ data = CreateReadFromDemuxerAckForVideo();
+ data.access_units[0].timestamp = base::TimeDelta::FromMilliseconds(120);
+ player_.OnDemuxerDataAvailable(data);
+ message_loop_.Run();
+ EXPECT_FALSE(IsPrerolling(false));
+}
+
+TEST_F(MediaSourcePlayerTest, VideoDemuxerConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that video config change notification results in request for demuxer
+ // configuration, and that a video decoder job results without any browser
+ // seek necessary once the new demuxer config arrives.
+ StartConfigChange(false, true, 1);
+ MediaDecoderJob* first_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(first_job);
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+
+ // Simulate arrival of new configs.
+ player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
+
+ // New video decoder job should have been created and configured, without any
+ // browser seek.
+ MediaDecoderJob* second_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(second_job);
+ EXPECT_NE(first_job, second_job);
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, VideoConfigChangeContinuesAcrossSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if a demuxer config request is pending (due to previously receiving
+ // |kConfigChanged|), and a seek request arrives prior to demuxer configs,
+ // then seek is processed first, followed by the decoder config change.
+ // This assumes the demuxer sends |kConfigChanged| read response prior to
+ // canceling any reads pending seek; no |kAborted| is involved in this test.
+ StartConfigChange(false, false, 1);
+ MediaDecoderJob* first_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(first_job);
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+
+ player_.SeekTo(base::TimeDelta::FromMilliseconds(100));
+
+ // Verify that the seek is requested immediately.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ // Simulate unlikely delayed arrival of the demuxer configs, completing the
+ // config change.
+ // TODO(wolenetz): Is it even possible for requested demuxer configs to be
+ // delayed until after a SeekTo request arrives?
+ player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
+
+ MediaDecoderJob* second_job = GetMediaDecoderJob(false);
+ EXPECT_NE(first_job, second_job);
+ EXPECT_TRUE(second_job);
+
+ // Send back the seek done notification. This should finish the seek and
+ // trigger the player to request more data.
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, NewSurfaceWhileChangingConfigs) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that no seek or duplicated demuxer config request results from a
+ // SetVideoSurface() that occurs while the player is expecting new demuxer
+ // configs. This test may be good to keep beyond browser seek hack.
+ StartConfigChange(false, false, 1);
+ MediaDecoderJob* first_job = GetMediaDecoderJob(false);
+ EXPECT_TRUE(first_job);
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ CreateNextTextureAndSetVideoSurface();
+
+ // Surface change processing (including decoder job re-creation) should
+ // not occur until the pending video config change is completed.
+ EXPECT_EQ(first_job, GetMediaDecoderJob(false));
+
+ player_.OnDemuxerConfigsAvailable(CreateVideoDemuxerConfigs());
+ MediaDecoderJob* second_job = GetMediaDecoderJob(false);
+ EXPECT_NE(first_job, second_job);
+ EXPECT_TRUE(second_job);
+
+ EXPECT_EQ(3, demuxer_->num_data_requests());
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+ EXPECT_EQ(0, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest,
+ BrowserSeek_DecoderStarvationWhilePendingSurfaceChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test video decoder starvation while handling a pending surface change
+ // should not cause any crashes.
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ DemuxerData data = CreateReadFromDemuxerAckForVideo();
+ player_.OnDemuxerDataAvailable(data);
+
+ // Trigger a surface change and decoder starvation.
+ CreateNextTextureAndSetVideoSurface();
+ TriggerPlayerStarvation();
+ WaitForVideoDecodeDone();
+
+ // Surface change should trigger a seek.
+ EXPECT_EQ(1, demuxer_->num_browser_seek_requests());
+ player_.OnDemuxerSeekDone(base::TimeDelta());
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+
+ // A new data request should be sent.
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ReleaseWithOnPrefetchDoneAlreadyPosted) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if OnPrefetchDone() had already been posted before and is executed
+ // after Release(), then player does not DCHECK. This test is fragile to
+ // change to MediaDecoderJob::Prefetch() implementation; it assumes task
+ // is posted to run |prefetch_cb| if the job already HasData().
+ // TODO(wolenetz): Remove MSP::set_decode_callback_for_testing() if this test
+ // becomes obsolete. See http://crbug.com/304234.
+ StartAudioDecoderJob(true);
+
+ // Escape the original prefetch by decoding a single access unit.
+ player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForAudio(0));
+ message_loop_.Run();
+
+ // Prime the job with a few more access units, so that a later prefetch,
+ // triggered by starvation to simulate decoder underrun, can trivially
+ // post task to run OnPrefetchDone().
+ player_.OnDemuxerDataAvailable(
+ CreateReadFromDemuxerAckWithConfigChanged(true, 4));
+ EXPECT_TRUE(GetMediaDecoderJob(true)->is_decoding());
+
+ // Simulate decoder underrun, so trivial prefetch starts while still decoding.
+ // The prefetch and posting of OnPrefetchDone() will not occur until next
+ // MediaDecoderCallBack() occurs.
+ TriggerPlayerStarvation();
+
+ // Upon the next successful decode callback, post a task to call Release() on
+ // the |player_|, such that the trivial OnPrefetchDone() task posting also
+ // occurs and should execute after the Release().
+ OnNextTestDecodeCallbackPostTaskToReleasePlayer();
+
+ while (GetMediaDecoderJob(true))
+ message_loop_.RunUntilIdle();
+ EXPECT_TRUE(decoder_callback_hook_executed_);
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // Player should have no decoder job until after Start().
+ StartAudioDecoderJob(true);
+}
+
+TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekAndDone) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after SeekTo(), but the DemuxerSeek IPC request
+ // has not yet been sent, then the seek request is sent after Release(). Also,
+ // test if OnDemuxerSeekDone() occurs prior to next Start(), then the player
+ // will resume correct post-seek preroll upon Start().
+ StartAudioDecoderJobAndSeekToWhileDecoding(
+ base::TimeDelta::FromMilliseconds(100));
+ ReleasePlayer();
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(player_.IsPlaying());
+
+ // Player should begin prefetch and resume preroll upon Start().
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ StartAudioDecoderJob(true);
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // No further seek should have been requested since Release(), above.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenDemuxerSeekThenStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after SeekTo(), but the DemuxerSeek IPC request
+ // has not yet been sent, then the seek request is sent after Release(). Also,
+ // test if OnDemuxerSeekDone() does not occur until after the next Start(),
+ // then the player remains pending seek done until (and resumes correct
+ // post-seek preroll after) OnDemuxerSeekDone().
+ StartAudioDecoderJobAndSeekToWhileDecoding(
+ base::TimeDelta::FromMilliseconds(100));
+ ReleasePlayer();
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ // Player should not prefetch upon Start() nor create the decoder job, due to
+ // awaiting DemuxerSeekDone.
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ StartAudioDecoderJob(false);
+
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No further seek should have been requested since Release(), above.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, SeekToThenDemuxerSeekThenReleaseThenSeekDone) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after a SeekTo()'s subsequent DemuxerSeek IPC
+ // request and OnDemuxerSeekDone() arrives prior to the next Start(), then the
+ // player will resume correct post-seek preroll upon Start().
+ StartAudioDecoderJobAndSeekToWhileDecoding(
+ base::TimeDelta::FromMilliseconds(100));
+ WaitForAudioDecodeDone();
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ ReleasePlayer();
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // Player should begin prefetch and resume preroll upon Start().
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ StartAudioDecoderJob(true);
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+
+ // No further seek should have been requested since before Release(), above.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, SeekToThenReleaseThenStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after a SeekTo()'s subsequent DemuxerSeeK IPC
+ // request and OnDemuxerSeekDone() does not occur until after the next
+ // Start(), then the player remains pending seek done until (and resumes
+ // correct post-seek preroll after) OnDemuxerSeekDone().
+ StartAudioDecoderJobAndSeekToWhileDecoding(
+ base::TimeDelta::FromMilliseconds(100));
+ WaitForAudioDecodeDone();
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+
+ ReleasePlayer();
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ StartAudioDecoderJob(false);
+
+ player_.OnDemuxerSeekDone(kNoTimestamp());
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_TRUE(IsPrerolling(true));
+ EXPECT_EQ(100.0, GetPrerollTimestamp().InMillisecondsF());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No further seek should have been requested since before Release(), above.
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenConfigsAvailable) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after |kConfigChanged| detected, new configs
+ // requested of demuxer, and the requested configs arrive before the next
+ // Start(), then the player completes the pending config change processing on
+ // their receipt.
+ StartConfigChange(true, true, 0);
+ ReleasePlayer();
+
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ // Player should resume upon Start(), even without further configs supplied.
+ player_.Start();
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_TRUE(player_.IsPlaying());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No further config request should have occurred since StartConfigChange().
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, ConfigChangedThenReleaseThenStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test if Release() occurs after |kConfigChanged| detected, new configs
+ // requested of demuxer, and the requested configs arrive after the next
+ // Start(), then the player pends job creation until the new configs arrive.
+ StartConfigChange(true, true, 0);
+ ReleasePlayer();
+
+ player_.Start();
+ EXPECT_TRUE(player_.IsPlaying());
+ EXPECT_FALSE(GetMediaDecoderJob(true));
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+
+ player_.OnDemuxerConfigsAvailable(CreateAudioDemuxerConfigs(kCodecVorbis));
+ EXPECT_TRUE(GetMediaDecoderJob(true));
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No further config request should have occurred since StartConfigChange().
+ EXPECT_EQ(1, demuxer_->num_config_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenDemuxerSeekDone) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that Release() after a browser seek's DemuxerSeek IPC request has been
+ // sent behaves similar to a regular seek: if OnDemuxerSeekDone() occurs
+ // before the next Start()+SetVideoSurface(), then the player will resume
+ // correct post-seek preroll upon Start()+SetVideoSurface().
+ BrowserSeekPlayer(false);
+ base::TimeDelta expected_preroll_timestamp = player_.GetCurrentTime();
+ ReleasePlayer();
+
+ player_.OnDemuxerSeekDone(expected_preroll_timestamp);
+ EXPECT_FALSE(player_.IsPlaying());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
+ EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
+
+ // Player should begin prefetch and resume preroll upon Start().
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(true);
+ EXPECT_TRUE(IsPrerolling(false));
+ EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
+ EXPECT_EQ(expected_preroll_timestamp, player_.GetCurrentTime());
+
+ // No further seek should have been requested since BrowserSeekPlayer().
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+TEST_F(MediaSourcePlayerTest, BrowserSeek_ThenReleaseThenStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that Release() after a browser seek's DemuxerSeek IPC request has been
+ // sent behaves similar to a regular seek: if OnDemuxerSeekDone() does not
+ // occur until after the next Start()+SetVideoSurface(), then the player
+ // remains pending seek done until (and resumes correct post-seek preroll
+ // after) OnDemuxerSeekDone().
+ BrowserSeekPlayer(false);
+ base::TimeDelta expected_preroll_timestamp = player_.GetCurrentTime();
+ ReleasePlayer();
+
+ EXPECT_EQ(1, demuxer_->num_data_requests());
+ CreateNextTextureAndSetVideoSurface();
+ StartVideoDecoderJob(false);
+
+ player_.OnDemuxerSeekDone(expected_preroll_timestamp);
+ EXPECT_TRUE(GetMediaDecoderJob(false));
+ EXPECT_TRUE(IsPrerolling(false));
+ EXPECT_EQ(expected_preroll_timestamp, GetPrerollTimestamp());
+ EXPECT_EQ(expected_preroll_timestamp, player_.GetCurrentTime());
+ EXPECT_EQ(2, demuxer_->num_data_requests());
+
+ // No further seek should have been requested since BrowserSeekPlayer().
+ EXPECT_EQ(1, demuxer_->num_seek_requests());
+}
+
+// TODO(xhwang): Once we add tests to cover DrmBridge, update this test to
+// also verify that the job is successfully created if SetDrmBridge(), Start()
+// and eventually OnMediaCrypto() occur. This would increase test coverage of
+// http://crbug.com/313470 and allow us to remove inspection of internal player
+// pending event state. See http://crbug.com/313860.
+TEST_F(MediaSourcePlayerTest, SurfaceChangeClearedEvenIfMediaCryptoAbsent) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that |SURFACE_CHANGE_EVENT_PENDING| is not pending after
+ // SetVideoSurface() for a player configured for encrypted video, when the
+ // player has not yet received media crypto.
+ DemuxerConfigs configs = CreateVideoDemuxerConfigs();
+ configs.is_video_encrypted = true;
+
+ player_.OnDemuxerConfigsAvailable(configs);
+ CreateNextTextureAndSetVideoSurface();
+ EXPECT_FALSE(IsPendingSurfaceChange());
+ EXPECT_FALSE(GetMediaDecoderJob(false));
}
// TODO(xhwang): Enable this test when the test devices are updated.
TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
+ VLOG(0) << "Could not run test - not supported on device.";
return;
}
@@ -693,7 +2099,7 @@ TEST_F(MediaSourcePlayerTest, DISABLED_IsTypeSupported_Widevine) {
TEST_F(MediaSourcePlayerTest, IsTypeSupported_InvalidUUID) {
if (!MediaCodecBridge::IsAvailable() || !MediaDrmBridge::IsAvailable()) {
- LOG(INFO) << "Could not run test - not supported on device.";
+ VLOG(0) << "Could not run test - not supported on device.";
return;
}
diff --git a/chromium/media/base/android/video_decoder_job.cc b/chromium/media/base/android/video_decoder_job.cc
index af89593362e..75124e7d0d7 100644
--- a/chromium/media/base/android/video_decoder_job.cc
+++ b/chromium/media/base/android/video_decoder_job.cc
@@ -30,9 +30,9 @@ VideoDecoderJob* VideoDecoderJob::Create(const VideoCodec video_codec,
jobject surface,
jobject media_crypto,
const base::Closure& request_data_cb) {
- scoped_ptr<VideoCodecBridge> codec(
- VideoCodecBridge::Create(video_codec, is_secure));
- if (codec && codec->Start(video_codec, size, surface, media_crypto))
+ scoped_ptr<VideoCodecBridge> codec(VideoCodecBridge::CreateDecoder(
+ video_codec, is_secure, size, surface, media_crypto));
+ if (codec)
return new VideoDecoderJob(codec.Pass(), request_data_cb);
LOG(ERROR) << "Failed to create VideoDecoderJob.";
@@ -51,14 +51,12 @@ VideoDecoderJob::~VideoDecoderJob() {
}
void VideoDecoderJob::ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback,
- MediaCodecStatus status) {
- if (status != MEDIA_CODEC_OUTPUT_END_OF_STREAM || size != 0u)
- video_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, true);
-
- callback.Run(status, presentation_timestamp, 0);
+ int output_buffer_index,
+ size_t size,
+ bool render_output,
+ const ReleaseOutputCompletionCallback& callback) {
+ video_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, render_output);
+ callback.Run(0u);
}
bool VideoDecoderJob::ComputeTimeToRender() const {
diff --git a/chromium/media/base/android/video_decoder_job.h b/chromium/media/base/android/video_decoder_job.h
index 27a3957c685..41c15edc39e 100644
--- a/chromium/media/base/android/video_decoder_job.h
+++ b/chromium/media/base/android/video_decoder_job.h
@@ -39,10 +39,10 @@ class VideoDecoderJob : public MediaDecoderJob {
// MediaDecoderJob implementation.
virtual void ReleaseOutputBuffer(
- int outputBufferIndex, size_t size,
- const base::TimeDelta& presentation_timestamp,
- const MediaDecoderJob::DecoderCallback& callback,
- MediaCodecStatus status) OVERRIDE;
+ int output_buffer_index,
+ size_t size,
+ bool render_output,
+ const ReleaseOutputCompletionCallback& callback) OVERRIDE;
virtual bool ComputeTimeToRender() const OVERRIDE;
diff --git a/chromium/media/base/audio_bus.cc b/chromium/media/base/audio_bus.cc
index 518d83cb4de..c1123471abc 100644
--- a/chromium/media/base/audio_bus.cc
+++ b/chromium/media/base/audio_bus.cc
@@ -5,6 +5,7 @@
#include "media/base/audio_bus.h"
#include "base/logging.h"
+#include "base/safe_numerics.h"
#include "media/audio/audio_parameters.h"
#include "media/base/limits.h"
#include "media/base/vector_math.h"
@@ -82,9 +83,10 @@ static void ToInterleavedInternal(const AudioBus* source, int start_frame,
}
}
-static void ValidateConfig(size_t channels, int frames) {
+static void ValidateConfig(int channels, int frames) {
CHECK_GT(frames, 0);
- CHECK_LE(channels, static_cast<size_t>(limits::kMaxChannels));
+ CHECK_GT(channels, 0);
+ CHECK_LE(channels, static_cast<int>(limits::kMaxChannels));
}
static void CheckOverflow(int start_frame, int frames, int total_frames) {
@@ -127,7 +129,8 @@ AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
: channel_data_(channel_data),
frames_(frames),
can_set_channel_data_(false) {
- ValidateConfig(channel_data_.size(), frames_);
+ ValidateConfig(
+ base::checked_numeric_cast<int>(channel_data_.size()), frames_);
// Sanity check wrapped vector for alignment and channel count.
for (size_t i = 0; i < channel_data_.size(); ++i)
@@ -138,6 +141,7 @@ AudioBus::AudioBus(int channels)
: channel_data_(channels),
frames_(0),
can_set_channel_data_(true) {
+ CHECK_GT(channels, 0);
for (size_t i = 0; i < channel_data_.size(); ++i)
channel_data_[i] = NULL;
}
@@ -190,6 +194,7 @@ void AudioBus::SetChannelData(int channel, float* data) {
void AudioBus::set_frames(int frames) {
CHECK(can_set_channel_data_);
+ ValidateConfig(static_cast<int>(channel_data_.size()), frames);
frames_ = frames;
}
diff --git a/chromium/media/base/audio_bus.h b/chromium/media/base/audio_bus.h
index dbb49ca57fc..d1106f558ef 100644
--- a/chromium/media/base/audio_bus.h
+++ b/chromium/media/base/audio_bus.h
@@ -47,7 +47,6 @@ class MEDIA_EXPORT AudioBus {
static scoped_ptr<AudioBus> WrapMemory(int channels, int frames, void* data);
static scoped_ptr<AudioBus> WrapMemory(const AudioParameters& params,
void* data);
- // Returns the required memory size to use the WrapMemory() method.
static int CalculateMemorySize(const AudioParameters& params);
// Calculates the required size for an AudioBus given the number of channels
diff --git a/chromium/media/base/audio_bus_perftest.cc b/chromium/media/base/audio_bus_perftest.cc
new file mode 100644
index 00000000000..ae60531074e
--- /dev/null
+++ b/chromium/media/base/audio_bus_perftest.cc
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
+
+static const int kBenchmarkIterations = 20;
+
+template <typename T>
+void RunInterleaveBench(AudioBus* bus, const std::string& trace_name) {
+ const int frame_size = bus->frames() * bus->channels();
+ scoped_ptr<T> interleaved(new T[frame_size]);
+ const int byte_size = sizeof(*interleaved);
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
+ }
+ double total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult(
+ "audio_bus_to_interleaved", "", trace_name,
+ total_time_milliseconds / kBenchmarkIterations, "ms", true);
+
+ start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
+ }
+ total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult(
+ "audio_bus_from_interleaved", "", trace_name,
+ total_time_milliseconds / kBenchmarkIterations, "ms", true);
+}
+
+// Benchmark the FromInterleaved() and ToInterleaved() methods.
+TEST(AudioBusPerfTest, Interleave) {
+ scoped_ptr<AudioBus> bus = AudioBus::Create(2, 48000 * 120);
+ FakeAudioRenderCallback callback(0.2);
+ callback.Render(bus.get(), 0);
+
+ RunInterleaveBench<int8>(bus.get(), "int8");
+ RunInterleaveBench<int16>(bus.get(), "int16");
+ RunInterleaveBench<int32>(bus.get(), "int32");
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_bus_unittest.cc b/chromium/media/base/audio_bus_unittest.cc
index a82090bd8e8..e8c78a36b41 100644
--- a/chromium/media/base/audio_bus_unittest.cc
+++ b/chromium/media/base/audio_bus_unittest.cc
@@ -413,60 +413,4 @@ TEST_F(AudioBusTest, Scale) {
}
}
-// Benchmark the FromInterleaved() and ToInterleaved() methods.
-TEST_F(AudioBusTest, DISABLED_InterleaveBench) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(2, 48000 * 120);
- const int frame_size = bus->frames() * bus->channels();
- FakeAudioRenderCallback callback(0.2);
- callback.Render(bus.get(), 0);
- {
- SCOPED_TRACE("uint8");
- scoped_ptr<uint8> interleaved(new uint8[frame_size]);
- const int byte_size = sizeof(*interleaved);
-
- base::TimeTicks start = base::TimeTicks::HighResNow();
- bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("ToInterleaved uint8 took %.2fms.\n", total_time_ms);
-
- start = base::TimeTicks::HighResNow();
- bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
- total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FromInterleaved uint8 took %.2fms.\n", total_time_ms);
- }
- {
- SCOPED_TRACE("int16");
- scoped_ptr<int16> interleaved(new int16[frame_size]);
- const int byte_size = sizeof(*interleaved);
-
- base::TimeTicks start = base::TimeTicks::HighResNow();
- bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("ToInterleaved int16 took %.2fms.\n", total_time_ms);
-
- start = base::TimeTicks::HighResNow();
- bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
- total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FromInterleaved int16 took %.2fms.\n", total_time_ms);
- }
- {
- SCOPED_TRACE("int32");
- scoped_ptr<int32> interleaved(new int32[frame_size]);
- const int byte_size = sizeof(*interleaved);
-
- base::TimeTicks start = base::TimeTicks::HighResNow();
- bus->ToInterleaved(bus->frames(), byte_size, interleaved.get());
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("ToInterleaved int32 took %.2fms.\n", total_time_ms);
-
- start = base::TimeTicks::HighResNow();
- bus->FromInterleaved(interleaved.get(), bus->frames(), byte_size);
- total_time_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FromInterleaved int32 took %.2fms.\n", total_time_ms);
- }
-}
-
} // namespace media
diff --git a/chromium/media/base/audio_converter.cc b/chromium/media/base/audio_converter.cc
index ac82e314ba3..d0c45136dad 100644
--- a/chromium/media/base/audio_converter.cc
+++ b/chromium/media/base/audio_converter.cc
@@ -98,10 +98,8 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
AudioConverter::~AudioConverter() {}
void AudioConverter::AddInput(InputCallback* input) {
- // TODO(dalecurtis): Speculative CHECK for http://crbug.com/233026, should be
- // converted to a DCHECK once resolved.
- CHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) ==
- transform_inputs_.end());
+ DCHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) ==
+ transform_inputs_.end());
transform_inputs_.push_back(input);
}
diff --git a/chromium/media/base/audio_converter_perftest.cc b/chromium/media/base/audio_converter_perftest.cc
new file mode 100644
index 00000000000..83f715e1100
--- /dev/null
+++ b/chromium/media/base/audio_converter_perftest.cc
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+#include "media/base/audio_converter.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
+
+static const int kBenchmarkIterations = 200000;
+
+// InputCallback that zero's out the provided AudioBus.
+class NullInputProvider : public AudioConverter::InputCallback {
+ public:
+ NullInputProvider() {}
+ virtual ~NullInputProvider() {}
+
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE {
+ audio_bus->Zero();
+ return 1;
+ }
+};
+
+void RunConvertBenchmark(const AudioParameters& in_params,
+ const AudioParameters& out_params,
+ bool fifo,
+ const std::string& trace_name) {
+ NullInputProvider fake_input1;
+ NullInputProvider fake_input2;
+ NullInputProvider fake_input3;
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(out_params);
+
+ AudioConverter converter(in_params, out_params, !fifo);
+ converter.AddInput(&fake_input1);
+ converter.AddInput(&fake_input2);
+ converter.AddInput(&fake_input3);
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ converter.Convert(output_bus.get());
+ }
+ double runs_per_second = kBenchmarkIterations /
+ (base::TimeTicks::HighResNow() - start).InSecondsF();
+ perf_test::PrintResult(
+ "audio_converter", "", trace_name, runs_per_second, "runs/s", true);
+}
+
+TEST(AudioConverterPerfTest, ConvertBenchmark) {
+ // Create input and output parameters to convert between the two most common
+ // sets of parameters (as indicated via UMA data).
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO, 48000, 16, 2048);
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 44100, 16, 440);
+
+ RunConvertBenchmark(input_params, output_params, false, "convert");
+}
+
+TEST(AudioConverterPerfTest, ConvertBenchmarkFIFO) {
+ // Create input and output parameters to convert between common buffer sizes
+ // without any resampling for the FIFO vs no FIFO benchmarks.
+ AudioParameters input_params(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO,
+ 44100,
+ 16,
+ 2048);
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 44100, 16, 440);
+
+ RunConvertBenchmark(input_params, output_params, true, "convert_fifo_only");
+ RunConvertBenchmark(input_params, output_params, false,
+ "convert_pass_through");
+}
+
+} // namespace media
diff --git a/chromium/media/base/audio_converter_unittest.cc b/chromium/media/base/audio_converter_unittest.cc
index d218ac882f4..aeb021c3114 100644
--- a/chromium/media/base/audio_converter_unittest.cc
+++ b/chromium/media/base/audio_converter_unittest.cc
@@ -7,12 +7,9 @@
#include <cmath>
-#include "base/command_line.h"
-#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/scoped_vector.h"
#include "base/strings/string_number_conversions.h"
-#include "base/time/time.h"
#include "media/base/audio_converter.h"
#include "media/base/fake_audio_render_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -20,10 +17,6 @@
namespace media {
-// Command line switch for runtime adjustment of benchmark iterations.
-static const char kBenchmarkIterations[] = "audio-converter-iterations";
-static const int kDefaultIterations = 10;
-
// Parameters which control the many input case tests.
static const int kConvertInputs = 8;
static const int kConvertCycles = 3;
@@ -234,107 +227,6 @@ TEST(AudioConverterTest, AudioDelay) {
callback.last_audio_delay_milliseconds());
}
-// InputCallback that zero's out the provided AudioBus. Used for benchmarking.
-class NullInputProvider : public AudioConverter::InputCallback {
- public:
- NullInputProvider() {}
- virtual ~NullInputProvider() {}
-
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE {
- audio_bus->Zero();
- return 1;
- }
-};
-
-// Benchmark for audio conversion. Original benchmarks were run with
-// --audio-converter-iterations=50000.
-TEST(AudioConverterTest, ConvertBenchmark) {
- int benchmark_iterations = kDefaultIterations;
- std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kBenchmarkIterations));
- base::StringToInt(iterations, &benchmark_iterations);
- if (benchmark_iterations < kDefaultIterations)
- benchmark_iterations = kDefaultIterations;
-
- NullInputProvider fake_input1;
- NullInputProvider fake_input2;
- NullInputProvider fake_input3;
-
- printf("Benchmarking %d iterations:\n", benchmark_iterations);
-
- {
- // Create input and output parameters to convert between the two most common
- // sets of parameters (as indicated via UMA data).
- AudioParameters input_params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 48000, 16, 2048);
- AudioParameters output_params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 44100, 16, 440);
- scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params);
-
- scoped_ptr<AudioConverter> converter(
- new AudioConverter(input_params, output_params, true));
- converter->AddInput(&fake_input1);
- converter->AddInput(&fake_input2);
- converter->AddInput(&fake_input3);
-
- // Benchmark Convert() w/ FIFO.
- base::TimeTicks start = base::TimeTicks::HighResNow();
- for (int i = 0; i < benchmark_iterations; ++i) {
- converter->Convert(output_bus.get());
- }
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("Convert() w/ Resampling took %.2fms.\n", total_time_ms);
- }
-
- // Create input and output parameters to convert between common buffer sizes
- // without any resampling for the FIFO vs no FIFO benchmarks.
- AudioParameters input_params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 44100, 16, 2048);
- AudioParameters output_params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 44100, 16, 440);
- scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params);
-
- {
- scoped_ptr<AudioConverter> converter(
- new AudioConverter(input_params, output_params, false));
- converter->AddInput(&fake_input1);
- converter->AddInput(&fake_input2);
- converter->AddInput(&fake_input3);
-
- // Benchmark Convert() w/ FIFO.
- base::TimeTicks start = base::TimeTicks::HighResNow();
- for (int i = 0; i < benchmark_iterations; ++i) {
- converter->Convert(output_bus.get());
- }
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("Convert() w/ FIFO took %.2fms.\n", total_time_ms);
- }
-
- {
- scoped_ptr<AudioConverter> converter(
- new AudioConverter(input_params, output_params, true));
- converter->AddInput(&fake_input1);
- converter->AddInput(&fake_input2);
- converter->AddInput(&fake_input3);
-
- // Benchmark Convert() w/o FIFO.
- base::TimeTicks start = base::TimeTicks::HighResNow();
- for (int i = 0; i < benchmark_iterations; ++i) {
- converter->Convert(output_bus.get());
- }
- double total_time_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("Convert() w/o FIFO took %.2fms.\n", total_time_ms);
- }
-}
-
TEST_P(AudioConverterTest, NoInputs) {
FillAudioData(1.0f);
EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
index a17d2215b97..53705ccda7b 100644
--- a/chromium/media/base/audio_decoder_config.h
+++ b/chromium/media/base/audio_decoder_config.h
@@ -33,6 +33,7 @@ enum AudioCodec {
kCodecPCM_S24BE,
kCodecOpus,
kCodecEAC3,
+ kCodecPCM_ALAW,
// DO NOT ADD RANDOM AUDIO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
diff --git a/chromium/media/base/audio_renderer_mixer_unittest.cc b/chromium/media/base/audio_renderer_mixer_unittest.cc
index 8853068335c..589358357b5 100644
--- a/chromium/media/base/audio_renderer_mixer_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_unittest.cc
@@ -22,18 +22,17 @@
namespace media {
// Parameters which control the many input case tests.
-static const int kMixerInputs = 8;
-static const int kMixerCycles = 3;
+const int kMixerInputs = 8;
+const int kMixerCycles = 3;
// Parameters used for testing.
-static const int kBitsPerChannel = 32;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
-static const int kHighLatencyBufferSize = 8192;
-static const int kLowLatencyBufferSize = 256;
-static const int kSampleRate = 48000;
+const int kBitsPerChannel = 32;
+const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+const int kHighLatencyBufferSize = 8192;
+const int kLowLatencyBufferSize = 256;
// Number of full sine wave cycles for each Render() call.
-static const int kSineCycles = 4;
+const int kSineCycles = 4;
// Tuple of <input sampling rate, output sampling rate, epsilon>.
typedef std::tr1::tuple<int, int, double> AudioRendererMixerTestData;
diff --git a/chromium/media/base/bind_to_loop.h b/chromium/media/base/bind_to_loop.h
index f224adbb266..92d358c7be9 100644
--- a/chromium/media/base/bind_to_loop.h
+++ b/chromium/media/base/bind_to_loop.h
@@ -40,10 +40,6 @@ template <typename T>
base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
scoped_ptr<T>& p) { return base::Passed(&p); }
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T[]> > TrampolineForward(
- scoped_ptr<T[]>& p) { return base::Passed(&p); }
-
template <typename T, typename R>
base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
diff --git a/chromium/media/base/bind_to_loop.h.pump b/chromium/media/base/bind_to_loop.h.pump
index 09ae518e2ee..8490413eb1c 100644
--- a/chromium/media/base/bind_to_loop.h.pump
+++ b/chromium/media/base/bind_to_loop.h.pump
@@ -45,10 +45,6 @@ template <typename T>
base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
scoped_ptr<T>& p) { return base::Passed(&p); }
-template <typename T>
-base::internal::PassedWrapper<scoped_array<T> > TrampolineForward(
- scoped_array<T>& p) { return base::Passed(&p); }
-
template <typename T, typename R>
base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
diff --git a/chromium/media/base/bit_reader.cc b/chromium/media/base/bit_reader.cc
index ea74350390a..e4d83af7410 100644
--- a/chromium/media/base/bit_reader.cc
+++ b/chromium/media/base/bit_reader.cc
@@ -19,7 +19,7 @@ BitReader::~BitReader() {}
bool BitReader::SkipBits(int num_bits) {
DCHECK_GE(num_bits, 0);
- DLOG_IF(INFO, num_bits > 100)
+ DVLOG_IF(0, num_bits > 100)
<< "BitReader::SkipBits inefficient for large skips";
// Skip any bits in the current byte waiting to be processed, then
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
index eddbc1b90ba..e048f8d9fc5 100644
--- a/chromium/media/base/channel_mixer_unittest.cc
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -100,20 +100,26 @@ TEST_P(ChannelMixerTest, Mixing) {
scoped_ptr<AudioBus> input_bus = AudioBus::Create(input_channels, kFrames);
AudioParameters input_audio(AudioParameters::AUDIO_PCM_LINEAR,
input_layout,
+ input_layout == CHANNEL_LAYOUT_DISCRETE ?
+ input_channels :
+ ChannelLayoutToChannelCount(input_layout),
+ 0,
AudioParameters::kAudioCDSampleRate, 16,
- kFrames);
- if (input_layout == CHANNEL_LAYOUT_DISCRETE)
- input_audio.SetDiscreteChannels(input_channels);
+ kFrames,
+ AudioParameters::NO_EFFECTS);
ChannelLayout output_layout = GetParam().output_layout;
int output_channels = GetParam().output_channels;
scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_channels, kFrames);
AudioParameters output_audio(AudioParameters::AUDIO_PCM_LINEAR,
output_layout,
+ output_layout == CHANNEL_LAYOUT_DISCRETE ?
+ output_channels :
+ ChannelLayoutToChannelCount(output_layout),
+ 0,
AudioParameters::kAudioCDSampleRate, 16,
- kFrames);
- if (output_layout == CHANNEL_LAYOUT_DISCRETE)
- output_audio.SetDiscreteChannels(output_channels);
+ kFrames,
+ AudioParameters::NO_EFFECTS);
const float* channel_values = GetParam().channel_values;
ASSERT_EQ(input_bus->channels(), GetParam().num_channel_values);
diff --git a/chromium/media/base/container_names_unittest.cc b/chromium/media/base/container_names_unittest.cc
index 21f80af6d98..c7d40d45afc 100644
--- a/chromium/media/base/container_names_unittest.cc
+++ b/chromium/media/base/container_names_unittest.cc
@@ -109,9 +109,9 @@ void TestFile(MediaContainerName expected, const base::FilePath& filename) {
// so use file length if file less than 8192 bytes (http://crbug.com/243885).
int read_size = sizeof(buffer);
int64 actual_size;
- if (file_util::GetFileSize(filename, &actual_size) && actual_size < read_size)
+ if (base::GetFileSize(filename, &actual_size) && actual_size < read_size)
read_size = actual_size;
- int read = file_util::ReadFile(filename, buffer, read_size);
+ int read = base::ReadFile(filename, buffer, read_size);
// Now verify the type.
EXPECT_EQ(expected,
diff --git a/chromium/media/base/data_source.cc b/chromium/media/base/data_source.cc
index c25f9e73d62..91f52608609 100644
--- a/chromium/media/base/data_source.cc
+++ b/chromium/media/base/data_source.cc
@@ -23,8 +23,6 @@ void DataSource::set_host(DataSourceHost* host) {
host_ = host;
}
-void DataSource::SetPlaybackRate(float playback_rate) {}
-
DataSourceHost* DataSource::host() { return host_; }
} // namespace media
diff --git a/chromium/media/base/data_source.h b/chromium/media/base/data_source.h
index def1d01f314..9176c8e845c 100644
--- a/chromium/media/base/data_source.h
+++ b/chromium/media/base/data_source.h
@@ -46,9 +46,6 @@ class MEDIA_EXPORT DataSource {
virtual void Read(int64 position, int size, uint8* data,
const DataSource::ReadCB& read_cb) = 0;
- // Notifies the DataSource of a change in the current playback rate.
- virtual void SetPlaybackRate(float playback_rate);
-
// Stops the DataSource. Once this is called all future Read() calls will
// return an error.
virtual void Stop(const base::Closure& callback) = 0;
diff --git a/chromium/media/base/demuxer.cc b/chromium/media/base/demuxer.cc
index 6cd4e29a481..e7c38c3f221 100644
--- a/chromium/media/base/demuxer.cc
+++ b/chromium/media/base/demuxer.cc
@@ -14,18 +14,4 @@ Demuxer::Demuxer() {}
Demuxer::~Demuxer() {}
-void Demuxer::SetPlaybackRate(float playback_rate) {}
-
-void Demuxer::Seek(base::TimeDelta time, const PipelineStatusCB& status_cb) {
- DCHECK(!status_cb.is_null());
- status_cb.Run(PIPELINE_OK);
-}
-
-void Demuxer::Stop(const base::Closure& callback) {
- DCHECK(!callback.is_null());
- callback.Run();
-}
-
-void Demuxer::OnAudioRendererDisabled() {}
-
} // namespace media
diff --git a/chromium/media/base/demuxer.h b/chromium/media/base/demuxer.h
index 853a21a2a75..9b671f007cb 100644
--- a/chromium/media/base/demuxer.h
+++ b/chromium/media/base/demuxer.h
@@ -15,6 +15,8 @@
namespace media {
+class TextTrackConfig;
+
class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
public:
// Sets the duration of the media in microseconds.
@@ -25,6 +27,13 @@ class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
// method with PIPELINE_OK.
virtual void OnDemuxerError(PipelineStatus error) = 0;
+ // Add |text_stream| to the collection managed by the text renderer.
+ virtual void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) = 0;
+
+ // Remove |text_stream| from the presentation.
+ virtual void RemoveTextStream(DemuxerStream* text_stream) = 0;
+
protected:
virtual ~DemuxerHost();
};
@@ -45,21 +54,19 @@ class MEDIA_EXPORT Demuxer {
// The demuxer does not own |host| as it is guaranteed to outlive the
// lifetime of the demuxer. Don't delete it!
virtual void Initialize(DemuxerHost* host,
- const PipelineStatusCB& status_cb) = 0;
-
- // The pipeline playback rate has been changed. Demuxers may implement this
- // method if they need to respond to this call.
- virtual void SetPlaybackRate(float playback_rate);
+ const PipelineStatusCB& status_cb,
+ bool enable_text_tracks) = 0;
// Carry out any actions required to seek to the given time, executing the
// callback upon completion.
- virtual void Seek(base::TimeDelta time, const PipelineStatusCB& status_cb);
+ virtual void Seek(base::TimeDelta time,
+ const PipelineStatusCB& status_cb) = 0;
// Starts stopping this demuxer, executing the callback upon completion.
//
// After the callback completes the demuxer may be destroyed. It is illegal to
// call any method (including Stop()) after a demuxer has stopped.
- virtual void Stop(const base::Closure& callback);
+ virtual void Stop(const base::Closure& callback) = 0;
// This method is called from the pipeline when the audio renderer
// is disabled. Demuxers can ignore the notification if they do not
@@ -67,9 +74,10 @@ class MEDIA_EXPORT Demuxer {
//
// TODO(acolwell): Change to generic DisableStream(DemuxerStream::Type).
// TODO(scherkus): this might not be needed http://crbug.com/234708
- virtual void OnAudioRendererDisabled();
+ virtual void OnAudioRendererDisabled() = 0;
- // Returns the given stream type, or NULL if that type is not present.
+ // Returns the first stream of the given stream type (which is not allowed
+ // to be DemuxerStream::TEXT), or NULL if that type of stream is not present.
virtual DemuxerStream* GetStream(DemuxerStream::Type type) = 0;
// Returns the starting time for the media file.
diff --git a/chromium/media/tools/demuxer_bench/demuxer_bench.cc b/chromium/media/base/demuxer_perftest.cc
index ab8b313c435..f63e6e4b3e5 100644
--- a/chromium/media/tools/demuxer_bench/demuxer_bench.cc
+++ b/chromium/media/base/demuxer_perftest.cc
@@ -1,27 +1,23 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// demuxer_bench is a standalone benchmarking tool for FFmpegDemuxer. It
-// simulates the reading requirements for playback by reading from the stream
-// that has the earliest timestamp.
-
-#include <iostream>
#include "base/at_exit.h"
#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
#include "media/base/media.h"
#include "media/base/media_log.h"
+#include "media/base/test_data_util.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
-namespace switches {
-const char kEnableBitstreamConverter[] = "enable-bitstream-converter";
-} // namespace switches
+static const int kBenchmarkIterations = 500;
class DemuxerHostImpl : public media::DemuxerHost {
public:
@@ -37,12 +33,15 @@ class DemuxerHostImpl : public media::DemuxerHost {
// DemuxerHost implementation.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE {}
virtual void OnDemuxerError(media::PipelineStatus error) OVERRIDE {}
+ virtual void AddTextStream(media::DemuxerStream* text_stream,
+ const media::TextTrackConfig& config) OVERRIDE {}
+ virtual void RemoveTextStream(media::DemuxerStream* text_stream) OVERRIDE {}
private:
DISALLOW_COPY_AND_ASSIGN(DemuxerHostImpl);
};
-void QuitLoopWithStatus(base::MessageLoop* message_loop,
+static void QuitLoopWithStatus(base::MessageLoop* message_loop,
media::PipelineStatus status) {
CHECK_EQ(status, media::PIPELINE_OK);
message_loop->PostTask(FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
@@ -50,7 +49,7 @@ void QuitLoopWithStatus(base::MessageLoop* message_loop,
static void NeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
- LOG(INFO) << "File is encrypted.";
+ VLOG(0) << "File is encrypted.";
}
typedef std::vector<media::DemuxerStream* > Streams;
@@ -68,7 +67,7 @@ class StreamReader {
// Returns true when all streams have reached end of stream.
bool IsDone();
- int number_of_streams() { return streams_.size(); }
+ int number_of_streams() { return static_cast<int>(streams_.size()); }
const Streams& streams() { return streams_; }
const std::vector<int>& counts() { return counts_; }
@@ -170,71 +169,62 @@ int StreamReader::GetNextStreamIndexToRead() {
return index;
}
-int main(int argc, char** argv) {
- base::AtExitManager at_exit;
- media::InitializeMediaLibraryForTesting();
-
- CommandLine::Init(argc, argv);
- CommandLine* cmd_line = CommandLine::ForCurrentProcess();
-
- if (cmd_line->GetArgs().empty()) {
- std::cerr << "Usage: " << argv[0] << " [file]\n\n"
- << "Options:\n"
- << " --" << switches::kEnableBitstreamConverter
- << " Enables H.264 Annex B bitstream conversion"
- << std::endl;
- return 1;
- }
-
- base::MessageLoop message_loop;
- DemuxerHostImpl demuxer_host;
- base::FilePath file_path(cmd_line->GetArgs()[0]);
-
- // Setup.
- media::FileDataSource data_source;
- CHECK(data_source.Initialize(file_path));
-
- media::Demuxer::NeedKeyCB need_key_cb = base::Bind(&NeedKey);
- media::FFmpegDemuxer demuxer(message_loop.message_loop_proxy(),
- &data_source,
- need_key_cb,
- new media::MediaLog());
-
- demuxer.Initialize(&demuxer_host, base::Bind(
- &QuitLoopWithStatus, &message_loop));
- message_loop.Run();
-
- StreamReader stream_reader(
- &demuxer, cmd_line->HasSwitch(switches::kEnableBitstreamConverter));
-
- // Benchmark.
- base::TimeTicks start = base::TimeTicks::HighResNow();
- while (!stream_reader.IsDone()) {
- stream_reader.Read();
- }
- base::TimeTicks end = base::TimeTicks::HighResNow();
-
- // Results.
- std::cout << "Time: " << (end - start).InMillisecondsF() << " ms\n";
- for (int i = 0; i < stream_reader.number_of_streams(); ++i) {
- media::DemuxerStream* stream = stream_reader.streams()[i];
- std::cout << "Stream #" << i << ": ";
-
- if (stream->type() == media::DemuxerStream::AUDIO) {
- std::cout << "audio";
- } else if (stream->type() == media::DemuxerStream::VIDEO) {
- std::cout << "video";
- } else {
- std::cout << "unknown";
+static void RunDemuxerBenchmark(const std::string& filename) {
+ base::FilePath file_path(GetTestDataFilePath(filename));
+ double total_time = 0.0;
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ // Setup.
+ base::MessageLoop message_loop;
+ DemuxerHostImpl demuxer_host;
+ FileDataSource data_source;
+ ASSERT_TRUE(data_source.Initialize(file_path));
+
+ Demuxer::NeedKeyCB need_key_cb = base::Bind(&NeedKey);
+ FFmpegDemuxer demuxer(message_loop.message_loop_proxy(),
+ &data_source,
+ need_key_cb,
+ new MediaLog());
+
+ demuxer.Initialize(&demuxer_host,
+ base::Bind(&QuitLoopWithStatus, &message_loop),
+ false);
+ message_loop.Run();
+ StreamReader stream_reader(&demuxer, false);
+
+ // Benchmark.
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ while (!stream_reader.IsDone()) {
+ stream_reader.Read();
}
-
- std::cout << ", " << stream_reader.counts()[i] << " packets" << std::endl;
+ base::TimeTicks end = base::TimeTicks::HighResNow();
+ total_time += (end - start).InSecondsF();
+ demuxer.Stop(base::Bind(
+ &QuitLoopWithStatus, &message_loop, PIPELINE_OK));
+ message_loop.Run();
}
- // Teardown.
- demuxer.Stop(base::Bind(
- &QuitLoopWithStatus, &message_loop, media::PIPELINE_OK));
- message_loop.Run();
+ perf_test::PrintResult("demuxer_bench",
+ "",
+ filename,
+ kBenchmarkIterations / total_time,
+ "runs/s",
+ true);
+}
- return 0;
+TEST(DemuxerPerfTest, Demuxer) {
+ RunDemuxerBenchmark("bear.ogv");
+ RunDemuxerBenchmark("bear-640x360.webm");
+ RunDemuxerBenchmark("sfx_s16le.wav");
+#if defined(USE_PROPRIETARY_CODECS)
+ RunDemuxerBenchmark("bear-1280x720.mp4");
+ RunDemuxerBenchmark("sfx.mp3");
+#endif
+#if defined(OS_CHROMEOS)
+ RunDemuxerBenchmark("bear.flac");
+#endif
+#if defined(USE_PROPRIETARY_CODECS) && defined(OS_CHROMEOS)
+ RunDemuxerBenchmark("bear.avi");
+#endif
}
+
+} // namespace media
diff --git a/chromium/media/base/demuxer_stream.h b/chromium/media/base/demuxer_stream.h
index bb4534475ed..4e07c66d8fe 100644
--- a/chromium/media/base/demuxer_stream.h
+++ b/chromium/media/base/demuxer_stream.h
@@ -21,6 +21,7 @@ class MEDIA_EXPORT DemuxerStream {
UNKNOWN,
AUDIO,
VIDEO,
+ TEXT,
NUM_TYPES, // Always keep this entry as the last one!
};
diff --git a/chromium/media/base/fake_audio_renderer_sink.cc b/chromium/media/base/fake_audio_renderer_sink.cc
new file mode 100644
index 00000000000..d42db6de1b9
--- /dev/null
+++ b/chromium/media/base/fake_audio_renderer_sink.cc
@@ -0,0 +1,86 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/fake_audio_renderer_sink.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+FakeAudioRendererSink::FakeAudioRendererSink()
+ : state_(kUninitialized),
+ callback_(NULL) {
+}
+
+FakeAudioRendererSink::~FakeAudioRendererSink() {
+ DCHECK(!callback_);
+}
+
+void FakeAudioRendererSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK_EQ(state_, kUninitialized);
+ DCHECK(!callback_);
+ DCHECK(callback);
+
+ callback_ = callback;
+ ChangeState(kInitialized);
+}
+
+void FakeAudioRendererSink::Start() {
+ DCHECK_EQ(state_, kInitialized);
+ ChangeState(kStarted);
+}
+
+void FakeAudioRendererSink::Stop() {
+ callback_ = NULL;
+ ChangeState(kStopped);
+}
+
+void FakeAudioRendererSink::Pause() {
+ DCHECK(state_ == kStarted || state_ == kPlaying) << "state_ " << state_;
+ ChangeState(kPaused);
+}
+
+void FakeAudioRendererSink::Play() {
+ DCHECK(state_ == kStarted || state_ == kPaused) << "state_ " << state_;
+ DCHECK_EQ(state_, kPaused);
+ ChangeState(kPlaying);
+}
+
+bool FakeAudioRendererSink::SetVolume(double volume) {
+ return true;
+}
+
+bool FakeAudioRendererSink::Render(AudioBus* dest, int audio_delay_milliseconds,
+ int* frames_written) {
+ if (state_ != kPlaying)
+ return false;
+
+ *frames_written = callback_->Render(dest, audio_delay_milliseconds);
+ return true;
+}
+
+void FakeAudioRendererSink::OnRenderError() {
+ DCHECK_NE(state_, kUninitialized);
+ DCHECK_NE(state_, kStopped);
+
+ callback_->OnRenderError();
+}
+
+void FakeAudioRendererSink::ChangeState(State new_state) {
+ static const char* kStateNames[] = {
+ "kUninitialized",
+ "kInitialized",
+ "kStarted",
+ "kPaused",
+ "kPlaying",
+ "kStopped"
+ };
+
+ DVLOG(1) << __FUNCTION__ << " : "
+ << kStateNames[state_] << " -> " << kStateNames[new_state];
+ state_ = new_state;
+}
+
+} // namespace media
diff --git a/chromium/media/base/fake_audio_renderer_sink.h b/chromium/media/base/fake_audio_renderer_sink.h
new file mode 100644
index 00000000000..b548224e6cc
--- /dev/null
+++ b/chromium/media/base/fake_audio_renderer_sink.h
@@ -0,0 +1,61 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_FAKE_AUDIO_RENDERER_SINK_H_
+#define MEDIA_BASE_FAKE_AUDIO_RENDERER_SINK_H_
+
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+class FakeAudioRendererSink : public AudioRendererSink {
+ public:
+ enum State {
+ kUninitialized,
+ kInitialized,
+ kStarted,
+ kPaused,
+ kPlaying,
+ kStopped
+ };
+
+ FakeAudioRendererSink();
+
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+
+ // Attempts to call Render() on the callback provided to
+ // Initialize() with |dest| and |audio_delay_milliseconds|.
+ // Returns true and sets |frames_written| to the return value of the
+ // Render() call.
+ // Returns false if this object is in a state where calling Render()
+ // should not occur. (i.e., in the kPaused or kStopped state.) The
+ // value of |frames_written| is undefined if false is returned.
+ bool Render(AudioBus* dest, int audio_delay_milliseconds,
+ int* frames_written);
+ void OnRenderError();
+
+ State state() const { return state_; }
+
+ protected:
+ virtual ~FakeAudioRendererSink();
+
+ private:
+ void ChangeState(State new_state);
+
+ State state_;
+ RenderCallback* callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioRendererSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_FAKE_AUDIO_RENDERER_SINK_H_
diff --git a/chromium/media/base/fake_text_track_stream.cc b/chromium/media/base/fake_text_track_stream.cc
new file mode 100644
index 00000000000..3136c475a78
--- /dev/null
+++ b/chromium/media/base/fake_text_track_stream.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/fake_text_track_stream.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "media/base/decoder_buffer.h"
+#include "media/filters/webvtt_util.h"
+
+namespace media {
+
+FakeTextTrackStream::FakeTextTrackStream()
+ : message_loop_(base::MessageLoopProxy::current()),
+ stopping_(false) {
+}
+
+FakeTextTrackStream::~FakeTextTrackStream() {
+ DCHECK(read_cb_.is_null());
+}
+
+void FakeTextTrackStream::Read(const ReadCB& read_cb) {
+ DCHECK(!read_cb.is_null());
+ DCHECK(read_cb_.is_null());
+ OnRead();
+ read_cb_ = read_cb;
+
+ if (stopping_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &FakeTextTrackStream::AbortPendingRead, base::Unretained(this)));
+ }
+}
+
+DemuxerStream::Type FakeTextTrackStream::type() {
+ return DemuxerStream::TEXT;
+}
+
+void FakeTextTrackStream::SatisfyPendingRead(
+ const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings) {
+ DCHECK(!read_cb_.is_null());
+
+ const uint8* const data_buf = reinterpret_cast<const uint8*>(content.data());
+ const int data_len = static_cast<int>(content.size());
+
+ std::vector<uint8> side_data;
+ MakeSideData(id.begin(), id.end(),
+ settings.begin(), settings.end(),
+ &side_data);
+
+ const uint8* const sd_buf = &side_data[0];
+ const int sd_len = static_cast<int>(side_data.size());
+
+ scoped_refptr<DecoderBuffer> buffer;
+ buffer = DecoderBuffer::CopyFrom(data_buf, data_len, sd_buf, sd_len);
+
+ buffer->set_timestamp(start);
+ buffer->set_duration(duration);
+
+ base::ResetAndReturn(&read_cb_).Run(kOk, buffer);
+}
+
+void FakeTextTrackStream::AbortPendingRead() {
+ DCHECK(!read_cb_.is_null());
+ base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+}
+
+void FakeTextTrackStream::SendEosNotification() {
+ DCHECK(!read_cb_.is_null());
+ base::ResetAndReturn(&read_cb_).Run(kOk, DecoderBuffer::CreateEOSBuffer());
+}
+
+void FakeTextTrackStream::Stop() {
+ stopping_ = true;
+ if (!read_cb_.is_null())
+ AbortPendingRead();
+}
+
+} // namespace media
diff --git a/chromium/media/base/fake_text_track_stream.h b/chromium/media/base/fake_text_track_stream.h
new file mode 100644
index 00000000000..33c74ef4f30
--- /dev/null
+++ b/chromium/media/base/fake_text_track_stream.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/video_decoder_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+// Fake implementation of the DemuxerStream. These are the stream objects
+// we pass to the text renderer object when streams are added and removed.
+class FakeTextTrackStream : public DemuxerStream {
+ public:
+ FakeTextTrackStream();
+ virtual ~FakeTextTrackStream();
+
+ // DemuxerStream implementation.
+ virtual void Read(const ReadCB&) OVERRIDE;
+ MOCK_METHOD0(audio_decoder_config, AudioDecoderConfig());
+ MOCK_METHOD0(video_decoder_config, VideoDecoderConfig());
+ virtual Type type() OVERRIDE;
+ MOCK_METHOD0(EnableBitstreamConverter, void());
+
+ void SatisfyPendingRead(const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings);
+ void AbortPendingRead();
+ void SendEosNotification();
+
+ void Stop();
+
+ MOCK_METHOD0(OnRead, void());
+
+ private:
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ ReadCB read_cb_;
+ bool stopping_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeTextTrackStream);
+};
+
+} // namespace media
diff --git a/chromium/media/base/filter_collection.cc b/chromium/media/base/filter_collection.cc
index 730835f1919..da5042f327f 100644
--- a/chromium/media/base/filter_collection.cc
+++ b/chromium/media/base/filter_collection.cc
@@ -6,6 +6,7 @@
#include "media/base/audio_renderer.h"
#include "media/base/demuxer.h"
+#include "media/base/text_renderer.h"
#include "media/base/video_renderer.h"
namespace media {
@@ -40,4 +41,13 @@ scoped_ptr<VideoRenderer> FilterCollection::GetVideoRenderer() {
return video_renderer_.Pass();
}
+void FilterCollection::SetTextRenderer(
+ scoped_ptr<TextRenderer> text_renderer) {
+ text_renderer_ = text_renderer.Pass();
+}
+
+scoped_ptr<TextRenderer> FilterCollection::GetTextRenderer() {
+ return text_renderer_.Pass();
+}
+
} // namespace media
diff --git a/chromium/media/base/filter_collection.h b/chromium/media/base/filter_collection.h
index 90ea0669446..a0aee76f0b8 100644
--- a/chromium/media/base/filter_collection.h
+++ b/chromium/media/base/filter_collection.h
@@ -12,6 +12,7 @@ namespace media {
class AudioRenderer;
class Demuxer;
+class TextRenderer;
class VideoRenderer;
// Represents a set of uninitialized demuxer and audio/video decoders and
@@ -33,10 +34,14 @@ class MEDIA_EXPORT FilterCollection {
void SetVideoRenderer(scoped_ptr<VideoRenderer> video_renderer);
scoped_ptr<VideoRenderer> GetVideoRenderer();
+ void SetTextRenderer(scoped_ptr<TextRenderer> text_renderer);
+ scoped_ptr<TextRenderer> GetTextRenderer();
+
private:
Demuxer* demuxer_;
scoped_ptr<AudioRenderer> audio_renderer_;
scoped_ptr<VideoRenderer> video_renderer_;
+ scoped_ptr<TextRenderer> text_renderer_;
DISALLOW_COPY_AND_ASSIGN(FilterCollection);
};
diff --git a/chromium/media/base/media_file_checker.cc b/chromium/media/base/media_file_checker.cc
index d4708e506c7..494657d209f 100644
--- a/chromium/media/base/media_file_checker.cc
+++ b/chromium/media/base/media_file_checker.cc
@@ -59,8 +59,8 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) {
return false;
AVPacket packet;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> frame(
- avcodec_alloc_frame());
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> frame(
+ av_frame_alloc());
int result = 0;
base::Time deadline = base::Time::Now() +
diff --git a/chromium/media/base/media_keys.h b/chromium/media/base/media_keys.h
index 9369c50b563..0d86948564e 100644
--- a/chromium/media/base/media_keys.h
+++ b/chromium/media/base/media_keys.h
@@ -24,7 +24,7 @@ class Decryptor;
class MEDIA_EXPORT MediaKeys {
public:
// Reported to UMA, so never reuse a value!
- // Must be kept in sync with WebKit::WebMediaPlayerClient::MediaKeyErrorCode
+ // Must be kept in sync with blink::WebMediaPlayerClient::MediaKeyErrorCode
// (enforced in webmediaplayer_impl.cc).
enum KeyError {
kUnknownError = 1,
@@ -37,27 +37,28 @@ class MEDIA_EXPORT MediaKeys {
kMaxKeyError // Must be last and greater than any legit value.
};
+ const static uint32 kInvalidSessionId = 0;
+
MediaKeys();
virtual ~MediaKeys();
// Generates a key request with the |type| and |init_data| provided.
// Returns true if generating key request succeeded, false otherwise.
- // Note: AddKey() and CancelKeyRequest() should only be called after
- // GenerateKeyRequest() returns true.
- virtual bool GenerateKeyRequest(const std::string& type,
- const uint8* init_data,
- int init_data_length) = 0;
-
- // Adds a |key| to the session. The |key| is not limited to a decryption
- // key. It can be any data that the key system accepts, such as a license.
- // If multiple calls of this function set different keys for the same
- // key ID, the older key will be replaced by the newer key.
- virtual void AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) = 0;
-
- // Cancels the key request specified by |session_id|.
- virtual void CancelKeyRequest(const std::string& session_id) = 0;
+ // Note: UpdateSession() and ReleaseSession() should only be called after
+ // CreateSession() returns true.
+ // TODO(jrummell): Remove return value when prefixed API is removed.
+ virtual bool CreateSession(uint32 session_id,
+ const std::string& type,
+ const uint8* init_data,
+ int init_data_length) = 0;
+
+ // Updates a session specified by |session_id| with |response|.
+ virtual void UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) = 0;
+
+ // Releases the session specified by |session_id|.
+ virtual void ReleaseSession(uint32 session_id) = 0;
// Gets the Decryptor object associated with the MediaKeys. Returns NULL if
// no Decryptor object is associated. The returned object is only guaranteed
@@ -69,20 +70,23 @@ class MEDIA_EXPORT MediaKeys {
};
// Key event callbacks. See the spec for details:
-// http://dvcs.w3.org/hg/html-media/raw-file/eme-v0.1b/encrypted-media/encrypted-media.html#event-summary
-typedef base::Callback<void(const std::string& session_id)> KeyAddedCB;
-
-typedef base::Callback<void(const std::string& session_id,
- media::MediaKeys::KeyError error_code,
- int system_code)> KeyErrorCB;
+// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#event-summary
+typedef base::Callback<
+ void(uint32 session_id, const std::string& web_session_id)>
+ SessionCreatedCB;
-typedef base::Callback<void(const std::string& session_id,
+typedef base::Callback<void(uint32 session_id,
const std::vector<uint8>& message,
- const std::string& default_url)> KeyMessageCB;
+ const std::string& destination_url)>
+ SessionMessageCB;
-typedef base::Callback<void(const std::string& session_id,
- const std::string& type,
- const std::vector<uint8>& init_data)> NeedKeyCB;
+typedef base::Callback<void(uint32 session_id)> SessionReadyCB;
+
+typedef base::Callback<void(uint32 session_id)> SessionClosedCB;
+
+typedef base::Callback<void(uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code)> SessionErrorCB;
} // namespace media
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index 8a07b020c7d..e791b441f4f 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -50,6 +50,8 @@ const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return "AUDIO_ENDED";
case MediaLogEvent::VIDEO_ENDED:
return "VIDEO_ENDED";
+ case MediaLogEvent::TEXT_ENDED:
+ return "TEXT_ENDED";
case MediaLogEvent::AUDIO_RENDERER_DISABLED:
return "AUDIO_RENDERER_DISABLED";
case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
diff --git a/chromium/media/base/media_log_event.h b/chromium/media/base/media_log_event.h
index 811d1131a7a..3052d415c12 100644
--- a/chromium/media/base/media_log_event.h
+++ b/chromium/media/base/media_log_event.h
@@ -70,9 +70,10 @@ struct MediaLogEvent {
TOTAL_BYTES_SET,
NETWORK_ACTIVITY_SET,
- // Audio/Video stream playback has ended.
+ // Audio/Video/Text stream playback has ended.
AUDIO_ENDED,
VIDEO_ENDED,
+ TEXT_ENDED,
// The audio renderer has been disabled.
// params: none.
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index c295a0d991d..3a8fb33bde9 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -12,8 +12,8 @@ const char kAudioBufferSize[] = "audio-buffer-size";
// Enable EAC3 playback in MSE.
const char kEnableEac3Playback[] = "enable-eac3-playback";
-// Enables Opus playback in media elements.
-const char kEnableOpusPlayback[] = "enable-opus-playback";
+// Disables Opus playback in media elements.
+const char kDisableOpusPlayback[] = "disable-opus-playback";
// Disables VP8 Alpha playback in media elements.
const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
@@ -21,10 +21,6 @@ const char kDisableVp8AlphaPlayback[] = "disable-vp8-alpha-playback";
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
-// Override suppressed responses to canPlayType().
-const char kOverrideEncryptedMediaCanPlayType[] =
- "override-encrypted-media-canplaytype";
-
// Enables MP3 stream parser for Media Source Extensions.
const char kEnableMP3StreamParser[] = "enable-mp3-stream-parser";
@@ -33,9 +29,6 @@ const char kEnableMP3StreamParser[] = "enable-mp3-stream-parser";
const char kDisableInfobarForProtectedMediaIdentifier[] =
"disable-infobar-for-protected-media-identifier";
-// Enables use of MediaDrm for Encrypted Media Extensions implementation.
-const char kEnableMediaDrm[] = "enable-mediadrm";
-
// Enables use of non-compositing MediaDrm decoding by default for Encrypted
// Media Extensions implementation.
const char kMediaDrmEnableNonCompositing[] = "mediadrm-enable-non-compositing";
@@ -63,6 +56,13 @@ const char kAlsaOutputDevice[] = "alsa-output-device";
// tested. See http://crbug.com/158170.
// TODO(dalecurtis): Remove this once we're sure nothing has exploded.
const char kDisableMainThreadAudio[] = "disable-main-thread-audio";
+// AVFoundation is available in versions 10.7 and onwards, and is to be used
+// http://crbug.com/288562 for both audio and video device monitoring and for
+// video capture. Being a dynamically loaded NSBundle and library, it hits the
+// Chrome startup time (http://crbug.com/311325 and http://crbug.com/311437);
+// until development is finished and the library load time issue is solved, the
+// usage of this library is hidden behind this flag.
+const char kEnableAVFoundation[] = "enable-avfoundation";
#endif
#if defined(OS_WIN)
@@ -98,4 +98,10 @@ const char kWaveOutBuffers[] = "waveout-buffers";
const char kUseCras[] = "use-cras";
#endif
+// Disables system sounds manager.
+const char kDisableSystemSoundsManager[] = "disable-system-sounds-manager";
+
+// Use a raw video file as fake video capture device.
+const char kUseFileForFakeVideoCapture[] = "use-file-for-fake-video-capture";
+
} // namespace switches
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index 963a351cba8..0c7fa245c84 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -16,19 +16,16 @@ MEDIA_EXPORT extern const char kAudioBufferSize[];
MEDIA_EXPORT extern const char kEnableEac3Playback[];
-MEDIA_EXPORT extern const char kEnableOpusPlayback[];
+MEDIA_EXPORT extern const char kDisableOpusPlayback[];
MEDIA_EXPORT extern const char kDisableVp8AlphaPlayback[];
MEDIA_EXPORT extern const char kVideoThreads[];
-MEDIA_EXPORT extern const char kOverrideEncryptedMediaCanPlayType[];
-
MEDIA_EXPORT extern const char kEnableMP3StreamParser[];
#if defined(OS_ANDROID)
MEDIA_EXPORT extern const char kDisableInfobarForProtectedMediaIdentifier[];
-MEDIA_EXPORT extern const char kEnableMediaDrm[];
MEDIA_EXPORT extern const char kMediaDrmEnableNonCompositing[];
#endif
@@ -43,6 +40,7 @@ MEDIA_EXPORT extern const char kAlsaOutputDevice[];
#if defined(OS_MACOSX)
MEDIA_EXPORT extern const char kDisableMainThreadAudio[];
+MEDIA_EXPORT extern const char kEnableAVFoundation[];
#endif
#if defined(OS_WIN)
@@ -57,6 +55,10 @@ MEDIA_EXPORT extern const char kWaveOutBuffers[];
MEDIA_EXPORT extern const char kUseCras[];
#endif
+MEDIA_EXPORT extern const char kDisableSystemSoundsManager[];
+
+MEDIA_EXPORT extern const char kUseFileForFakeVideoCapture[];
+
} // namespace switches
#endif // MEDIA_BASE_MEDIA_SWITCHES_H_
diff --git a/chromium/media/base/mock_demuxer_host.h b/chromium/media/base/mock_demuxer_host.h
index 597c13298c5..61761a84b95 100644
--- a/chromium/media/base/mock_demuxer_host.h
+++ b/chromium/media/base/mock_demuxer_host.h
@@ -5,9 +5,8 @@
#ifndef MEDIA_BASE_MOCK_DEMUXER_HOST_H_
#define MEDIA_BASE_MOCK_DEMUXER_HOST_H_
-#include <string>
-
#include "media/base/demuxer.h"
+#include "media/base/text_track_config.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -26,6 +25,9 @@ class MockDemuxerHost : public DemuxerHost {
// DemuxerHost implementation.
MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
MOCK_METHOD1(SetDuration, void(base::TimeDelta duration));
+ MOCK_METHOD2(AddTextStream, void(DemuxerStream*,
+ const TextTrackConfig&));
+ MOCK_METHOD1(RemoveTextStream, void(DemuxerStream*));
private:
DISALLOW_COPY_AND_ASSIGN(MockDemuxerHost);
diff --git a/chromium/media/base/mock_filters.cc b/chromium/media/base/mock_filters.cc
index eaf52013cda..e4faf70b3ee 100644
--- a/chromium/media/base/mock_filters.cc
+++ b/chromium/media/base/mock_filters.cc
@@ -66,6 +66,10 @@ MockAudioRenderer::MockAudioRenderer() {}
MockAudioRenderer::~MockAudioRenderer() {}
+MockTextTrack::MockTextTrack() {}
+
+MockTextTrack::~MockTextTrack() {}
+
MockDecryptor::MockDecryptor() {}
MockDecryptor::~MockDecryptor() {}
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index fb5e8a0dfd8..c71590da1d1 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -16,6 +16,7 @@
#include "media/base/demuxer.h"
#include "media/base/filter_collection.h"
#include "media/base/pipeline_status.h"
+#include "media/base/text_track.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
@@ -30,7 +31,8 @@ class MockDemuxer : public Demuxer {
virtual ~MockDemuxer();
// Demuxer implementation.
- MOCK_METHOD2(Initialize, void(DemuxerHost* host, const PipelineStatusCB& cb));
+ MOCK_METHOD3(Initialize,
+ void(DemuxerHost* host, const PipelineStatusCB& cb, bool));
MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
MOCK_METHOD2(Seek, void(base::TimeDelta time, const PipelineStatusCB& cb));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
@@ -155,6 +157,21 @@ class MockAudioRenderer : public AudioRenderer {
DISALLOW_COPY_AND_ASSIGN(MockAudioRenderer);
};
+class MockTextTrack : public TextTrack {
+ public:
+ MockTextTrack();
+ virtual ~MockTextTrack();
+
+ MOCK_METHOD5(addWebVTTCue, void(const base::TimeDelta& start,
+ const base::TimeDelta& end,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockTextTrack);
+};
+
class MockDecryptor : public Decryptor {
public:
MockDecryptor();
diff --git a/chromium/media/base/pipeline.cc b/chromium/media/base/pipeline.cc
index 9790c61cb5c..5799dc3f410 100644
--- a/chromium/media/base/pipeline.cc
+++ b/chromium/media/base/pipeline.cc
@@ -21,6 +21,8 @@
#include "media/base/clock.h"
#include "media/base/filter_collection.h"
#include "media/base/media_log.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_renderer.h"
@@ -47,6 +49,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
state_(kCreated),
audio_ended_(false),
video_ended_(false),
+ text_ended_(false),
audio_disabled_(false),
demuxer_(NULL),
creation_time_(default_tick_clock_.NowTicks()) {
@@ -293,6 +296,19 @@ void Pipeline::OnDemuxerError(PipelineStatus error) {
SetError(error);
}
+void Pipeline::AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::AddTextStreamTask, base::Unretained(this),
+ text_stream, config));
+}
+
+void Pipeline::RemoveTextStream(DemuxerStream* text_stream) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::RemoveTextStreamTask, base::Unretained(this),
+ text_stream));
+}
+
void Pipeline::SetError(PipelineStatus error) {
DCHECK(IsRunning());
DCHECK_NE(PIPELINE_OK, error);
@@ -378,7 +394,11 @@ void Pipeline::SetTotalBytes(int64 total_bytes) {
TimeDelta Pipeline::TimeForByteOffset_Locked(int64 byte_offset) const {
lock_.AssertAcquired();
- TimeDelta time_offset = byte_offset * clock_->Duration() / total_bytes_;
+ // Use floating point to avoid potential overflow when using 64 bit integers.
+ double time_offset_in_ms = clock_->Duration().InMilliseconds() *
+ (static_cast<double>(byte_offset) / total_bytes_);
+ TimeDelta time_offset(TimeDelta::FromMilliseconds(
+ static_cast<int64>(time_offset_in_ms)));
// Since the byte->time calculation is approximate, fudge the beginning &
// ending areas to look better.
TimeDelta epsilon = clock_->Duration() / 100;
@@ -537,6 +557,10 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&VideoRenderer::Pause, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Pause, base::Unretained(text_renderer_.get())));
+ }
// Flush.
if (audio_renderer_) {
@@ -547,6 +571,10 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&VideoRenderer::Flush, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Flush, base::Unretained(text_renderer_.get())));
+ }
// Seek demuxer.
bound_fns.Push(base::Bind(
@@ -586,6 +614,11 @@ void Pipeline::DoPlay(const PipelineStatusCB& done_cb) {
&VideoRenderer::Play, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Play, base::Unretained(text_renderer_.get())));
+ }
+
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
}
@@ -609,6 +642,11 @@ void Pipeline::DoStop(const PipelineStatusCB& done_cb) {
&VideoRenderer::Stop, base::Unretained(video_renderer_.get())));
}
+ if (text_renderer_) {
+ bound_fns.Push(base::Bind(
+ &TextRenderer::Stop, base::Unretained(text_renderer_.get())));
+ }
+
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
}
@@ -625,6 +663,7 @@ void Pipeline::OnStopCompleted(PipelineStatus status) {
filter_collection_.reset();
audio_renderer_.reset();
video_renderer_.reset();
+ text_renderer_.reset();
demuxer_ = NULL;
// If we stop during initialization/seeking we want to run |seek_cb_|
@@ -685,6 +724,13 @@ void Pipeline::OnVideoRendererEnded() {
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::VIDEO_ENDED));
}
+void Pipeline::OnTextRendererEnded() {
+ // Force post to process ended messages after current execution frame.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &Pipeline::DoTextRendererEnded, base::Unretained(this)));
+ media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::TEXT_ENDED));
+}
+
// Called from any thread.
void Pipeline::OnUpdateStatistics(const PipelineStatistics& stats) {
base::AutoLock auto_lock(lock_);
@@ -711,6 +757,13 @@ void Pipeline::StartTask(scoped_ptr<FilterCollection> filter_collection,
buffering_state_cb_ = buffering_state_cb;
duration_change_cb_ = duration_change_cb;
+ text_renderer_ = filter_collection_->GetTextRenderer();
+
+ if (text_renderer_) {
+ text_renderer_->Initialize(
+ base::Bind(&Pipeline::OnTextRendererEnded, base::Unretained(this)));
+ }
+
StateTransitionTask(PIPELINE_OK);
}
@@ -760,8 +813,6 @@ void Pipeline::PlaybackRateChangedTask(float playback_rate) {
clock_->SetPlaybackRate(playback_rate);
}
- if (demuxer_)
- demuxer_->SetPlaybackRate(playback_rate);
if (audio_renderer_)
audio_renderer_->SetPlaybackRate(playback_rate_);
if (video_renderer_)
@@ -802,6 +853,7 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
seek_cb_ = seek_cb;
audio_ended_ = false;
video_ended_ = false;
+ text_ended_ = false;
// Kick off seeking!
{
@@ -845,6 +897,18 @@ void Pipeline::DoVideoRendererEnded() {
RunEndedCallbackIfNeeded();
}
+void Pipeline::DoTextRendererEnded() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ != kStarted)
+ return;
+
+ DCHECK(!text_ended_);
+ text_ended_ = true;
+
+ RunEndedCallbackIfNeeded();
+}
+
void Pipeline::RunEndedCallbackIfNeeded() {
DCHECK(message_loop_->BelongsToCurrentThread());
@@ -854,6 +918,9 @@ void Pipeline::RunEndedCallbackIfNeeded() {
if (video_renderer_ && !video_ended_)
return;
+ if (text_renderer_ && text_renderer_->HasTracks() && !text_ended_)
+ return;
+
{
base::AutoLock auto_lock(lock_);
clock_->EndOfStream();
@@ -878,11 +945,24 @@ void Pipeline::AudioDisabledTask() {
StartClockIfWaitingForTimeUpdate_Locked();
}
+void Pipeline::AddTextStreamTask(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ // TODO(matthewjheaney): fix up text_ended_ when text stream
+ // is added (http://crbug.com/321446).
+ text_renderer_->AddTextStream(text_stream, config);
+}
+
+void Pipeline::RemoveTextStreamTask(DemuxerStream* text_stream) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ text_renderer_->RemoveTextStream(text_stream);
+}
+
void Pipeline::InitializeDemuxer(const PipelineStatusCB& done_cb) {
DCHECK(message_loop_->BelongsToCurrentThread());
demuxer_ = filter_collection_->GetDemuxer();
- demuxer_->Initialize(this, done_cb);
+ demuxer_->Initialize(this, done_cb, text_renderer_);
}
void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 09ff9041639..222091fcdbf 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -30,6 +30,8 @@ namespace media {
class Clock;
class FilterCollection;
class MediaLog;
+class TextRenderer;
+class TextTrackConfig;
class VideoRenderer;
// Pipeline runs the media pipeline. Filters are created and called on the
@@ -232,6 +234,9 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// DemuxerHost implementaion.
virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
virtual void OnDemuxerError(PipelineStatus error) OVERRIDE;
+ virtual void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) OVERRIDE;
+ virtual void RemoveTextStream(DemuxerStream* text_stream) OVERRIDE;
// Initiates teardown sequence in response to a runtime error.
//
@@ -244,6 +249,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Callbacks executed when a renderer has ended.
void OnAudioRendererEnded();
void OnVideoRendererEnded();
+ void OnTextRendererEnded();
// Callback executed by filters to update statistics.
void OnUpdateStatistics(const PipelineStatistics& stats);
@@ -283,14 +289,22 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Carries out notifying filters that we are seeking to a new timestamp.
void SeekTask(base::TimeDelta time, const PipelineStatusCB& seek_cb);
- // Handles audio/video ended logic and running |ended_cb_|.
+ // Handles audio/video/text ended logic and running |ended_cb_|.
void DoAudioRendererEnded();
void DoVideoRendererEnded();
+ void DoTextRendererEnded();
void RunEndedCallbackIfNeeded();
// Carries out disabling the audio renderer.
void AudioDisabledTask();
+ // Carries out adding a new text stream to the text renderer.
+ void AddTextStreamTask(DemuxerStream* text_stream,
+ const TextTrackConfig& config);
+
+ // Carries out removing a text stream from the text renderer.
+ void RemoveTextStreamTask(DemuxerStream* text_stream);
+
// Kicks off initialization for each media object, executing |done_cb| with
// the result when completed.
void InitializeDemuxer(const PipelineStatusCB& done_cb);
@@ -392,7 +406,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// reset the pipeline state, and restore this to PIPELINE_OK.
PipelineStatus status_;
- // Whether the media contains rendered audio and video streams.
+ // Whether the media contains rendered audio or video streams.
// TODO(fischman,scherkus): replace these with checks for
// {audio,video}_decoder_ once extraction of {Audio,Video}Decoder from the
// Filter heirarchy is done.
@@ -405,9 +419,10 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Member that tracks the current state.
State state_;
- // Whether we've received the audio/video ended events.
+ // Whether we've received the audio/video/text ended events.
bool audio_ended_;
bool video_ended_;
+ bool text_ended_;
// Set to true in DisableAudioRendererTask().
bool audio_disabled_;
@@ -434,6 +449,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// playback rate, and determining when playback has finished.
scoped_ptr<AudioRenderer> audio_renderer_;
scoped_ptr<VideoRenderer> video_renderer_;
+ scoped_ptr<TextRenderer> text_renderer_;
PipelineStatistics statistics_;
diff --git a/chromium/media/base/pipeline_status.cc b/chromium/media/base/pipeline_status.cc
deleted file mode 100644
index 6c08383cdc9..00000000000
--- a/chromium/media/base/pipeline_status.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/pipeline_status.h"
-
-#include "base/bind.h"
-#include "base/metrics/histogram.h"
-
-namespace media {
-
-static void ReportAndRun(const std::string& name,
- const PipelineStatusCB& cb,
- PipelineStatus status) {
- UMA_HISTOGRAM_ENUMERATION(name, status, PIPELINE_STATUS_MAX);
- cb.Run(status);
-}
-
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb) {
- return base::Bind(&ReportAndRun, name, cb);
-}
-
-} // namespace media
diff --git a/chromium/media/base/pipeline_status.h b/chromium/media/base/pipeline_status.h
index c208d01d583..a9f8585f573 100644
--- a/chromium/media/base/pipeline_status.h
+++ b/chromium/media/base/pipeline_status.h
@@ -37,11 +37,6 @@ enum PipelineStatus {
typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
-// Wrap & return a callback around |cb| which reports its argument to UMA under
-// the requested |name|.
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb);
-
// TODO(scherkus): this should be moved alongside host interface definitions.
struct PipelineStatistics {
PipelineStatistics()
diff --git a/chromium/media/base/pipeline_unittest.cc b/chromium/media/base/pipeline_unittest.cc
index 4c8640c7807..a7a8cae316c 100644
--- a/chromium/media/base/pipeline_unittest.cc
+++ b/chromium/media/base/pipeline_unittest.cc
@@ -11,11 +11,14 @@
#include "base/threading/simple_thread.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
+#include "media/base/fake_text_track_stream.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/media_log.h"
#include "media/base/mock_filters.h"
#include "media/base/pipeline.h"
#include "media/base/test_helpers.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/size.h"
@@ -36,8 +39,7 @@ using ::testing::WithArg;
namespace media {
// Demuxer properties.
-static const int kTotalBytes = 1024;
-static const int kBitrate = 1234;
+const int kTotalBytes = 1024;
ACTION_P(SetDemuxerProperties, duration) {
arg0->SetTotalBytes(kTotalBytes);
@@ -94,6 +96,13 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<AudioRenderer> audio_renderer(audio_renderer_);
filter_collection_->SetAudioRenderer(audio_renderer.Pass());
+ text_renderer_ = new TextRenderer(
+ message_loop_.message_loop_proxy(),
+ base::Bind(&PipelineTest::OnAddTextTrack,
+ base::Unretained(this)));
+ scoped_ptr<TextRenderer> text_renderer(text_renderer_);
+ filter_collection_->SetTextRenderer(text_renderer.Pass());
+
// InitializeDemuxer() adds overriding expectations for expected non-NULL
// streams.
DemuxerStream* null_pointer = NULL;
@@ -110,6 +119,13 @@ class PipelineTest : public ::testing::Test {
ExpectStop();
+ // The mock demuxer doesn't stop the fake text track stream,
+ // so just stop it manually.
+ if (text_stream_) {
+ text_stream_->Stop();
+ message_loop_.RunUntilIdle();
+ }
+
// Expect a stop callback if we were started.
EXPECT_CALL(callbacks_, OnStop());
pipeline_->Stop(base::Bind(&CallbackHelper::OnStop,
@@ -123,7 +139,7 @@ class PipelineTest : public ::testing::Test {
void InitializeDemuxer(MockDemuxerStreamVector* streams,
const base::TimeDelta& duration) {
EXPECT_CALL(callbacks_, OnDurationChange());
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(SetDemuxerProperties(duration),
RunCallback<1>(PIPELINE_OK)));
@@ -174,6 +190,13 @@ class PipelineTest : public ::testing::Test {
}
}
+ void AddTextStream() {
+ EXPECT_CALL(*this, OnAddTextTrack(_,_))
+ .WillOnce(Invoke(this, &PipelineTest::DoOnAddTextTrack));
+ static_cast<DemuxerHost*>(pipeline_.get())->AddTextStream(text_stream(),
+ TextTrackConfig(kTextSubtitles, "", "", ""));
+ }
+
// Sets up expectations on the callback and initializes the pipeline. Called
// after tests have set expectations any filters they wish to use.
void InitializePipeline(PipelineStatus start_status) {
@@ -181,7 +204,6 @@ class PipelineTest : public ::testing::Test {
if (start_status == PIPELINE_OK) {
EXPECT_CALL(callbacks_, OnBufferingState(Pipeline::kHaveMetadata));
- EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
if (audio_stream_) {
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
@@ -217,6 +239,11 @@ class PipelineTest : public ::testing::Test {
video_stream_->set_video_decoder_config(video_decoder_config_);
}
+ void CreateTextStream() {
+ scoped_ptr<FakeTextTrackStream> text_stream(new FakeTextTrackStream);
+ text_stream_ = text_stream.Pass();
+ }
+
MockDemuxerStream* audio_stream() {
return audio_stream_.get();
}
@@ -225,11 +252,14 @@ class PipelineTest : public ::testing::Test {
return video_stream_.get();
}
+ FakeTextTrackStream* text_stream() {
+ return text_stream_.get();
+ }
+
void ExpectSeek(const base::TimeDelta& seek_time) {
// Every filter should receive a call to Seek().
EXPECT_CALL(*demuxer_, Seek(seek_time, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*demuxer_, SetPlaybackRate(_));
if (audio_stream_) {
EXPECT_CALL(*audio_renderer_, Pause(_))
@@ -284,6 +314,15 @@ class PipelineTest : public ::testing::Test {
EXPECT_CALL(*video_renderer_, Stop(_)).WillOnce(RunClosure<0>());
}
+ MOCK_METHOD2(OnAddTextTrack, void(const TextTrackConfig&,
+ const AddTextTrackDoneCB&));
+
+ void DoOnAddTextTrack(const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb) {
+ scoped_ptr<TextTrack> text_track(new MockTextTrack);
+ done_cb.Run(text_track.Pass());
+ }
+
// Fixture members.
StrictMock<CallbackHelper> callbacks_;
base::SimpleTestTickClock test_tick_clock_;
@@ -294,8 +333,11 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<MockDemuxer> demuxer_;
MockVideoRenderer* video_renderer_;
MockAudioRenderer* audio_renderer_;
+ StrictMock<CallbackHelper> text_renderer_callbacks_;
+ TextRenderer* text_renderer_;
scoped_ptr<StrictMock<MockDemuxerStream> > audio_stream_;
scoped_ptr<StrictMock<MockDemuxerStream> > video_stream_;
+ scoped_ptr<FakeTextTrackStream> text_stream_;
AudioRenderer::TimeCB audio_time_cb_;
VideoDecoderConfig video_decoder_config_;
@@ -341,7 +383,7 @@ TEST_F(PipelineTest, NotStarted) {
TEST_F(PipelineTest, NeverInitializes) {
// Don't execute the callback passed into Initialize().
- EXPECT_CALL(*demuxer_, Initialize(_, _));
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _));
// This test hangs during initialization by never calling
// InitializationComplete(). StrictMock<> will ensure that the callback is
@@ -366,7 +408,7 @@ TEST_F(PipelineTest, NeverInitializes) {
}
TEST_F(PipelineTest, URLNotFound) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -375,7 +417,7 @@ TEST_F(PipelineTest, URLNotFound) {
}
TEST_F(PipelineTest, NoStreams) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -425,9 +467,47 @@ TEST_F(PipelineTest, AudioVideoStream) {
EXPECT_TRUE(pipeline_->HasVideo());
}
+TEST_F(PipelineTest, VideoTextStream) {
+ CreateVideoStream();
+ CreateTextStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeVideoRenderer(video_stream());
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_FALSE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+}
+
+TEST_F(PipelineTest, VideoAudioTextStream) {
+ CreateVideoStream();
+ CreateAudioStream();
+ CreateTextStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(video_stream());
+ streams.push_back(audio_stream());
+
+ InitializeDemuxer(&streams);
+ InitializeVideoRenderer(video_stream());
+ InitializeAudioRenderer(audio_stream(), false);
+
+ InitializePipeline(PIPELINE_OK);
+ EXPECT_TRUE(pipeline_->HasAudio());
+ EXPECT_TRUE(pipeline_->HasVideo());
+
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+}
+
TEST_F(PipelineTest, Seek) {
CreateAudioStream();
CreateVideoStream();
+ CreateTextStream();
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
streams.push_back(video_stream());
@@ -439,6 +519,9 @@ TEST_F(PipelineTest, Seek) {
// Initialize then seek!
InitializePipeline(PIPELINE_OK);
+ AddTextStream();
+ message_loop_.RunUntilIdle();
+
// Every filter should receive a call to Seek().
base::TimeDelta expected = base::TimeDelta::FromSeconds(2000);
ExpectSeek(expected);
@@ -577,6 +660,7 @@ TEST_F(PipelineTest, DisableAudioRendererDuringInit) {
TEST_F(PipelineTest, EndedCallback) {
CreateAudioStream();
CreateVideoStream();
+ CreateTextStream();
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
streams.push_back(video_stream());
@@ -586,13 +670,18 @@ TEST_F(PipelineTest, EndedCallback) {
InitializeVideoRenderer(video_stream());
InitializePipeline(PIPELINE_OK);
- // The ended callback shouldn't run until both renderers have ended.
+ AddTextStream();
+
+ // The ended callback shouldn't run until all renderers have ended.
pipeline_->OnAudioRendererEnded();
message_loop_.RunUntilIdle();
- EXPECT_CALL(callbacks_, OnEnded());
pipeline_->OnVideoRendererEnded();
message_loop_.RunUntilIdle();
+
+ EXPECT_CALL(callbacks_, OnEnded());
+ text_stream()->SendEosNotification();
+ message_loop_.RunUntilIdle();
}
TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
@@ -616,7 +705,6 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
EXPECT_EQ(0, pipeline_->GetMediaTime().ToInternalValue());
float playback_rate = 1.0f;
- EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
EXPECT_CALL(*video_renderer_, SetPlaybackRate(playback_rate));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
pipeline_->SetPlaybackRate(playback_rate);
@@ -654,7 +742,6 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
InitializePipeline(PIPELINE_OK);
float playback_rate = 1.0f;
- EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
pipeline_->SetPlaybackRate(playback_rate);
message_loop_.RunUntilIdle();
@@ -789,7 +876,6 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
InitializePipeline(PIPELINE_OK);
float playback_rate = 1.0f;
- EXPECT_CALL(*demuxer_, SetPlaybackRate(playback_rate));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
pipeline_->SetPlaybackRate(playback_rate);
message_loop_.RunUntilIdle();
@@ -815,7 +901,6 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
.WillOnce(RunClosure<0>());
EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*demuxer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetVolume(_));
EXPECT_CALL(*audio_renderer_, Play(_))
@@ -930,13 +1015,13 @@ class PipelineTeardownTest : public PipelineTest {
if (state == kInitDemuxer) {
if (stop_or_error == kStop) {
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
RunCallback<1>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnStop());
} else {
status = DEMUXER_ERROR_COULD_NOT_OPEN;
- EXPECT_CALL(*demuxer_, Initialize(_, _))
+ EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(status));
}
@@ -1000,7 +1085,6 @@ class PipelineTeardownTest : public PipelineTest {
EXPECT_CALL(*video_renderer_, Preroll(base::TimeDelta(), _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
@@ -1109,7 +1193,6 @@ class PipelineTeardownTest : public PipelineTest {
.WillOnce(RunCallback<1>(PIPELINE_OK));
// Playback rate and volume are updated prior to starting.
- EXPECT_CALL(*demuxer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
diff --git a/chromium/media/base/run_all_unittests.cc b/chromium/media/base/run_all_unittests.cc
index 1c4da930470..f1a0092814c 100644
--- a/chromium/media/base/run_all_unittests.cc
+++ b/chromium/media/base/run_all_unittests.cc
@@ -4,8 +4,8 @@
#include "base/bind.h"
#include "base/command_line.h"
+#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
-#include "base/test/unit_test_launcher.h"
#include "build/build_config.h"
#include "media/base/media.h"
#include "media/base/media_switches.h"
@@ -41,10 +41,6 @@ void TestSuiteNoAtExit::Initialize() {
media::InitializeMediaLibraryForTesting();
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
cmd_line->AppendSwitch(switches::kEnableMP3StreamParser);
-
- // Enable Opus support for all media tests.
- // TODO(vigneshv): Remove this once the Opus flag is removed or negated.
- cmd_line->AppendSwitch(switches::kEnableOpusPlayback);
}
int main(int argc, char** argv) {
diff --git a/chromium/media/base/simd/vector_math_sse.cc b/chromium/media/base/simd/vector_math_sse.cc
index 39bcaa0c195..c2121225cd6 100644
--- a/chromium/media/base/simd/vector_math_sse.cc
+++ b/chromium/media/base/simd/vector_math_sse.cc
@@ -4,6 +4,8 @@
#include "media/base/vector_math_testing.h"
+#include <algorithm>
+
#include <xmmintrin.h> // NOLINT
namespace media {
@@ -35,5 +37,82 @@ void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
dest[i] += src[i] * scale;
}
+// Convenience macro to extract float 0 through 3 from the vector |a|. This is
+// needed because compilers other than clang don't support access via
+// operator[]().
+#define EXTRACT_FLOAT(a, i) \
+ (i == 0 ? \
+ _mm_cvtss_f32(a) : \
+ _mm_cvtss_f32(_mm_shuffle_ps(a, a, i)))
+
+std::pair<float, float> EWMAAndMaxPower_SSE(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // When the recurrence is unrolled, we see that we can split it into 4
+ // separate lanes of evaluation:
+ //
+ // y[n] = a(S[n]^2) + (1-a)(y[n-1])
+ // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
+ // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ //
+ // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
+ //
+ // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
+ // each of the 4 lanes, and then combine them to give y[n].
+
+ const int rem = len % 4;
+ const int last_index = len - rem;
+
+ const __m128 smoothing_factor_x4 = _mm_set_ps1(smoothing_factor);
+ const float weight_prev = 1.0f - smoothing_factor;
+ const __m128 weight_prev_x4 = _mm_set_ps1(weight_prev);
+ const __m128 weight_prev_squared_x4 =
+ _mm_mul_ps(weight_prev_x4, weight_prev_x4);
+ const __m128 weight_prev_4th_x4 =
+ _mm_mul_ps(weight_prev_squared_x4, weight_prev_squared_x4);
+
+ // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
+ // 0, respectively.
+ __m128 max_x4 = _mm_setzero_ps();
+ __m128 ewma_x4 = _mm_setr_ps(0.0f, 0.0f, 0.0f, initial_value);
+ int i;
+ for (i = 0; i < last_index; i += 4) {
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_4th_x4);
+ const __m128 sample_x4 = _mm_load_ps(src + i);
+ const __m128 sample_squared_x4 = _mm_mul_ps(sample_x4, sample_x4);
+ max_x4 = _mm_max_ps(max_x4, sample_squared_x4);
+ // Note: The compiler optimizes this to a single multiply-and-accumulate
+ // instruction:
+ ewma_x4 = _mm_add_ps(ewma_x4,
+ _mm_mul_ps(sample_squared_x4, smoothing_factor_x4));
+ }
+
+ // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ float ewma = EXTRACT_FLOAT(ewma_x4, 3);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 2);
+ ewma_x4 = _mm_mul_ps(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 1);
+ ewma_x4 = _mm_mul_ss(ewma_x4, weight_prev_x4);
+ ewma += EXTRACT_FLOAT(ewma_x4, 0);
+
+ // Fold the maximums together to get the overall maximum.
+ max_x4 = _mm_max_ps(max_x4,
+ _mm_shuffle_ps(max_x4, max_x4, _MM_SHUFFLE(3, 3, 1, 1)));
+ max_x4 = _mm_max_ss(max_x4, _mm_shuffle_ps(max_x4, max_x4, 2));
+
+ std::pair<float, float> result(ewma, EXTRACT_FLOAT(max_x4, 0));
+
+ // Handle remaining values at the end of |src|.
+ for (; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+
+ return result;
+}
+
} // namespace vector_math
} // namespace media
diff --git a/chromium/media/base/sinc_resampler.cc b/chromium/media/base/sinc_resampler.cc
index 5566f64ce86..82168dbc64a 100644
--- a/chromium/media/base/sinc_resampler.cc
+++ b/chromium/media/base/sinc_resampler.cc
@@ -153,7 +153,8 @@ SincResampler::SincResampler(double io_sample_rate_ratio,
input_buffer_(static_cast<float*>(
base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
r1_(input_buffer_.get()),
- r2_(input_buffer_.get() + kKernelSize / 2) {
+ r2_(input_buffer_.get() + kKernelSize / 2),
+ currently_resampling_(0) {
CHECK_GT(request_frames_, 0);
Flush();
CHECK_GT(block_size_, kKernelSize)
@@ -169,7 +170,10 @@ SincResampler::SincResampler(double io_sample_rate_ratio,
InitializeKernel();
}
-SincResampler::~SincResampler() {}
+SincResampler::~SincResampler() {
+ // TODO(dalecurtis): Remove debugging for http://crbug.com/295278
+ CHECK(base::AtomicRefCountIsZero(&currently_resampling_));
+}
void SincResampler::UpdateRegions(bool second_load) {
// Setup various region pointers in the buffer (see diagram above). If we're
@@ -252,6 +256,8 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
}
void SincResampler::Resample(int frames, float* destination) {
+ base::AtomicRefCountInc(&currently_resampling_);
+
int remaining_frames = frames;
// Step (1) -- Prime the input buffer at the start of the input stream.
@@ -305,8 +311,10 @@ void SincResampler::Resample(int frames, float* destination) {
// Advance the virtual index.
virtual_source_idx_ += current_io_ratio;
- if (!--remaining_frames)
+ if (!--remaining_frames) {
+ CHECK(!base::AtomicRefCountDec(&currently_resampling_));
return;
+ }
}
// Wrap back around to the start.
@@ -323,6 +331,8 @@ void SincResampler::Resample(int frames, float* destination) {
// Step (5) -- Refresh the buffer with more input.
read_cb_.Run(request_frames_, r0_);
}
+
+ CHECK(!base::AtomicRefCountDec(&currently_resampling_));
}
#undef CONVOLVE_FUNC
@@ -332,6 +342,7 @@ int SincResampler::ChunkSize() const {
}
void SincResampler::Flush() {
+ CHECK(base::AtomicRefCountIsZero(&currently_resampling_));
virtual_source_idx_ = 0;
buffer_primed_ = false;
memset(input_buffer_.get(), 0,
diff --git a/chromium/media/base/sinc_resampler.h b/chromium/media/base/sinc_resampler.h
index facd1a106df..217077830cc 100644
--- a/chromium/media/base/sinc_resampler.h
+++ b/chromium/media/base/sinc_resampler.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_SINC_RESAMPLER_H_
#define MEDIA_BASE_SINC_RESAMPLER_H_
+#include "base/atomic_ref_count.h"
#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/memory/aligned_memory.h"
@@ -74,7 +75,7 @@ class MEDIA_EXPORT SincResampler {
private:
FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
- FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark);
+ FRIEND_TEST_ALL_PREFIXES(SincResamplerPerfTest, Convolve);
void InitializeKernel();
void UpdateRegions(bool second_load);
@@ -135,6 +136,11 @@ class MEDIA_EXPORT SincResampler {
float* r3_;
float* r4_;
+ // Atomic ref count indicating when when we're in the middle of resampling.
+ // Will be CHECK'd to find crashes...
+ // TODO(dalecurtis): Remove debug helpers for http://crbug.com/295278
+ base::AtomicRefCount currently_resampling_;
+
DISALLOW_COPY_AND_ASSIGN(SincResampler);
};
diff --git a/chromium/media/base/sinc_resampler_perftest.cc b/chromium/media/base/sinc_resampler_perftest.cc
new file mode 100644
index 00000000000..21c6ec325c9
--- /dev/null
+++ b/chromium/media/base/sinc_resampler_perftest.cc
@@ -0,0 +1,76 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/cpu.h"
+#include "base/time/time.h"
+#include "media/base/sinc_resampler.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
+
+static const int kBenchmarkIterations = 50000000;
+
+static const double kSampleRateRatio = 192000.0 / 44100.0;
+static const double kKernelInterpolationFactor = 0.5;
+
+// Helper function to provide no input to SincResampler's Convolve benchmark.
+static void DoNothing(int frames, float* destination) {}
+
+// Define platform independent function name for Convolve* tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define CONVOLVE_FUNC Convolve_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define CONVOLVE_FUNC Convolve_NEON
+#endif
+
+static void RunConvolveBenchmark(
+ SincResampler* resampler,
+ float (*convolve_fn)(const float*, const float*, const float*, double),
+ bool aligned,
+ const std::string& trace_name) {
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ convolve_fn(resampler->get_kernel_for_testing() + (aligned ? 0 : 1),
+ resampler->get_kernel_for_testing(),
+ resampler->get_kernel_for_testing(),
+ kKernelInterpolationFactor);
+ }
+ double total_time_milliseconds =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult("sinc_resampler_convolve",
+ "",
+ trace_name,
+ kBenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
+ true);
+}
+
+// Benchmark for the various Convolve() methods. Make sure to build with
+// branding=Chrome so that DCHECKs are compiled out when benchmarking.
+TEST(SincResamplerPerfTest, Convolve) {
+ SincResampler resampler(kSampleRateRatio,
+ SincResampler::kDefaultRequestSize,
+ base::Bind(&DoNothing));
+
+ RunConvolveBenchmark(
+ &resampler, SincResampler::Convolve_C, true, "unoptimized_aligned");
+
+#if defined(CONVOLVE_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+ RunConvolveBenchmark(
+ &resampler, SincResampler::CONVOLVE_FUNC, true, "optimized_aligned");
+ RunConvolveBenchmark(
+ &resampler, SincResampler::CONVOLVE_FUNC, false, "optimized_unaligned");
+#endif
+}
+
+#undef CONVOLVE_FUNC
+
+} // namespace media
diff --git a/chromium/media/base/sinc_resampler_unittest.cc b/chromium/media/base/sinc_resampler_unittest.cc
index 8b89a5d3808..3b460a39c39 100644
--- a/chromium/media/base/sinc_resampler_unittest.cc
+++ b/chromium/media/base/sinc_resampler_unittest.cc
@@ -9,11 +9,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/command_line.h"
#include "base/cpu.h"
-#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/stringize_macros.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/base/sinc_resampler.h"
@@ -25,10 +22,6 @@ using testing::_;
namespace media {
static const double kSampleRateRatio = 192000.0 / 44100.0;
-static const double kKernelInterpolationFactor = 0.5;
-
-// Command line switch for runtime adjustment of ConvolveBenchmark iterations.
-static const char kConvolveIterations[] = "convolve-iterations";
// Helper class to ensure ChunkedResample() functions properly.
class MockSource {
@@ -125,6 +118,8 @@ TEST(SincResamplerTest, DISABLED_SetRatioBench) {
// this test if other optimized methods exist, otherwise the default Convolve()
// will be tested by the parameterized SincResampler tests below.
#if defined(CONVOLVE_FUNC)
+static const double kKernelInterpolationFactor = 0.5;
+
TEST(SincResamplerTest, Convolve) {
#if defined(ARCH_CPU_X86_FAMILY)
ASSERT_TRUE(base::CPU().has_sse());
@@ -161,74 +156,6 @@ TEST(SincResamplerTest, Convolve) {
}
#endif
-// Benchmark for the various Convolve() methods. Make sure to build with
-// branding=Chrome so that DCHECKs are compiled out when benchmarking. Original
-// benchmarks were run with --convolve-iterations=50000000.
-TEST(SincResamplerTest, ConvolveBenchmark) {
- // Initialize a dummy resampler.
- MockSource mock_source;
- SincResampler resampler(
- kSampleRateRatio, SincResampler::kDefaultRequestSize,
- base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
-
- // Retrieve benchmark iterations from command line.
- int convolve_iterations = 10;
- std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kConvolveIterations));
- if (!iterations.empty())
- base::StringToInt(iterations, &convolve_iterations);
-
- printf("Benchmarking %d iterations:\n", convolve_iterations);
-
- // Benchmark Convolve_C().
- base::TimeTicks start = base::TimeTicks::HighResNow();
- for (int i = 0; i < convolve_iterations; ++i) {
- resampler.Convolve_C(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_c_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("Convolve_C took %.2fms.\n", total_time_c_ms);
-
-#if defined(CONVOLVE_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
-
- // Benchmark with unaligned input pointer.
- start = base::TimeTicks::HighResNow();
- for (int j = 0; j < convolve_iterations; ++j) {
- resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_optimized_unaligned_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(CONVOLVE_FUNC) " (unaligned) took %.2fms; which is %.2fx "
- "faster than Convolve_C.\n", total_time_optimized_unaligned_ms,
- total_time_c_ms / total_time_optimized_unaligned_ms);
-
- // Benchmark with aligned input pointer.
- start = base::TimeTicks::HighResNow();
- for (int j = 0; j < convolve_iterations; ++j) {
- resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_optimized_aligned_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(CONVOLVE_FUNC) " (aligned) took %.2fms; which is %.2fx "
- "faster than Convolve_C and %.2fx faster than "
- STRINGIZE(CONVOLVE_FUNC) " (unaligned).\n",
- total_time_optimized_aligned_ms,
- total_time_c_ms / total_time_optimized_aligned_ms,
- total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
-#endif
-}
-
-#undef CONVOLVE_FUNC
-
// Fake audio source for testing the resampler. Generates a sinusoidal linear
// chirp (http://en.wikipedia.org/wiki/Chirp) which can be tuned to stress the
// resampler for the specific sample rate conversion being used.
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
index 33a336def8b..101ce4eee0e 100644
--- a/chromium/media/base/stream_parser.h
+++ b/chromium/media/base/stream_parser.h
@@ -6,6 +6,7 @@
#define MEDIA_BASE_STREAM_PARSER_H_
#include <deque>
+#include <map>
#include <string>
#include "base/callback_forward.h"
@@ -14,18 +15,19 @@
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
-#include "media/base/text_track.h"
namespace media {
class AudioDecoderConfig;
class StreamParserBuffer;
+class TextTrackConfig;
class VideoDecoderConfig;
// Abstract interface for parsing media byte streams.
class MEDIA_EXPORT StreamParser {
public:
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+ typedef std::map<int, TextTrackConfig> TextTrackConfigMap;
StreamParser();
virtual ~StreamParser();
@@ -43,11 +45,14 @@ class MEDIA_EXPORT StreamParser {
// then it means that there isn't an audio stream.
// Second parameter - The new video configuration. If the config is not valid
// then it means that there isn't an audio stream.
+ // Third parameter - The new text tracks configuration. If the map is empty,
+ // then no text tracks were parsed from the stream.
// Return value - True if the new configurations are accepted.
// False if the new configurations are not supported
// and indicates that a parsing error should be signalled.
typedef base::Callback<bool(const AudioDecoderConfig&,
- const VideoDecoderConfig&)> NewConfigCB;
+ const VideoDecoderConfig&,
+ const TextTrackConfigMap&)> NewConfigCB;
// New stream buffers have been parsed.
// First parameter - A queue of newly parsed audio buffers.
@@ -59,12 +64,13 @@ class MEDIA_EXPORT StreamParser {
const BufferQueue&)> NewBuffersCB;
// New stream buffers of inband text have been parsed.
- // First parameter - The text track to which these cues will be added.
+ // First parameter - The id of the text track to which these cues will
+ // be added.
// Second parameter - A queue of newly parsed buffers.
// Return value - True indicates that the buffers are accepted.
// False if something was wrong with the buffers and a parsing
// error should be signalled.
- typedef base::Callback<bool(TextTrack*, const BufferQueue&)> NewTextBuffersCB;
+ typedef base::Callback<bool(int, const BufferQueue&)> NewTextBuffersCB;
// Signals the beginning of a new media segment.
typedef base::Callback<void()> NewMediaSegmentCB;
@@ -85,7 +91,6 @@ class MEDIA_EXPORT StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) = 0;
diff --git a/chromium/media/base/test_data_util.cc b/chromium/media/base/test_data_util.cc
index 55e82fc8635..386617e006b 100644
--- a/chromium/media/base/test_data_util.cc
+++ b/chromium/media/base/test_data_util.cc
@@ -30,14 +30,14 @@ scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
.AppendASCII(name);
int64 tmp = 0;
- CHECK(file_util::GetFileSize(file_path, &tmp))
+ CHECK(base::GetFileSize(file_path, &tmp))
<< "Failed to get file size for '" << name << "'";
int file_size = static_cast<int>(tmp);
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(file_size));
CHECK_EQ(file_size,
- file_util::ReadFile(
+ base::ReadFile(
file_path, reinterpret_cast<char*>(buffer->writable_data()),
file_size)) << "Failed to read '" << name << "'";
diff --git a/chromium/media/base/text_cue.cc b/chromium/media/base/text_cue.cc
new file mode 100644
index 00000000000..3d8a8926642
--- /dev/null
+++ b/chromium/media/base/text_cue.cc
@@ -0,0 +1,23 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_cue.h"
+
+namespace media {
+
+TextCue::TextCue(const base::TimeDelta& timestamp,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& settings,
+ const std::string& text)
+ : timestamp_(timestamp),
+ duration_(duration),
+ id_(id),
+ settings_(settings),
+ text_(text) {
+}
+
+TextCue::~TextCue() {}
+
+} // namespace media
diff --git a/chromium/media/base/text_cue.h b/chromium/media/base/text_cue.h
new file mode 100644
index 00000000000..2afae8d5a4f
--- /dev/null
+++ b/chromium/media/base/text_cue.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_CUE_H_
+#define MEDIA_BASE_TEXT_CUE_H_
+
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A text buffer to carry the components of a text track cue.
+class MEDIA_EXPORT TextCue
+ : public base::RefCountedThreadSafe<TextCue> {
+ public:
+ TextCue(const base::TimeDelta& timestamp,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& settings,
+ const std::string& text);
+
+ // Access to constructor parameters.
+ base::TimeDelta timestamp() const { return timestamp_; }
+ base::TimeDelta duration() const { return duration_; }
+ const std::string& id() const { return id_; }
+ const std::string& settings() const { return settings_; }
+ const std::string& text() const { return text_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<TextCue>;
+ ~TextCue();
+
+ base::TimeDelta timestamp_;
+ base::TimeDelta duration_;
+ std::string id_;
+ std::string settings_;
+ std::string text_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TextCue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_CUE_H_
diff --git a/chromium/media/base/text_renderer.cc b/chromium/media/base/text_renderer.cc
new file mode 100644
index 00000000000..91f9a33618d
--- /dev/null
+++ b/chromium/media/base/text_renderer.cc
@@ -0,0 +1,369 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_renderer.h"
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/stl_util.h"
+#include "media/base/bind_to_loop.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/text_cue.h"
+
+namespace media {
+
+TextRenderer::TextRenderer(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const AddTextTrackCB& add_text_track_cb)
+ : message_loop_(message_loop),
+ weak_factory_(this),
+ add_text_track_cb_(add_text_track_cb),
+ state_(kUninitialized),
+ pending_read_count_(0) {
+}
+
+TextRenderer::~TextRenderer() {
+ DCHECK(state_ == kUninitialized ||
+ state_ == kStopped) << "state_ " << state_;
+ DCHECK_EQ(pending_read_count_, 0);
+ STLDeleteValues(&text_track_state_map_);
+}
+
+void TextRenderer::Initialize(const base::Closure& ended_cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!ended_cb.is_null());
+ DCHECK_EQ(kUninitialized, state_) << "state_ " << state_;
+ DCHECK(text_track_state_map_.empty());
+ DCHECK_EQ(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.empty());
+ DCHECK(ended_cb_.is_null());
+
+ weak_this_ = weak_factory_.GetWeakPtr();
+ ended_cb_ = ended_cb;
+ state_ = kPaused;
+}
+
+void TextRenderer::Play(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, kPaused) << "state_ " << state_;
+
+ for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
+ itr != text_track_state_map_.end(); ++itr) {
+ TextTrackState* state = itr->second;
+ if (state->read_state == TextTrackState::kReadPending) {
+ DCHECK_GT(pending_read_count_, 0);
+ continue;
+ }
+
+ Read(state, itr->first);
+ }
+
+ state_ = kPlaying;
+ callback.Run();
+}
+
+void TextRenderer::Pause(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ == kPlaying || state_ == kEnded) << "state_ " << state_;
+ DCHECK_GE(pending_read_count_, 0);
+ pause_cb_ = callback;
+
+ if (pending_read_count_ == 0) {
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ return;
+ }
+
+ state_ = kPausePending;
+}
+
+void TextRenderer::Flush(const base::Closure& callback) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(pending_read_count_, 0);
+ DCHECK(state_ == kPaused) << "state_ " << state_;
+
+ for (TextTrackStateMap::iterator itr = text_track_state_map_.begin();
+ itr != text_track_state_map_.end(); ++itr) {
+ pending_eos_set_.insert(itr->first);
+ }
+ DCHECK_EQ(pending_eos_set_.size(), text_track_state_map_.size());
+ callback.Run();
+}
+
+void TextRenderer::Stop(const base::Closure& cb) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!cb.is_null());
+ DCHECK(state_ == kPlaying ||
+ state_ == kPausePending ||
+ state_ == kPaused ||
+ state_ == kEnded) << "state_ " << state_;
+ DCHECK_GE(pending_read_count_, 0);
+
+ stop_cb_ = cb;
+
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ return;
+ }
+
+ state_ = kStopPending;
+}
+
+void TextRenderer::AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized) << "state_ " << state_;
+ DCHECK_NE(state_, kStopPending);
+ DCHECK_NE(state_, kStopped);
+ DCHECK(text_track_state_map_.find(text_stream) ==
+ text_track_state_map_.end());
+ DCHECK(pending_eos_set_.find(text_stream) ==
+ pending_eos_set_.end());
+
+ media::AddTextTrackDoneCB done_cb =
+ media::BindToLoop(message_loop_,
+ base::Bind(&TextRenderer::OnAddTextTrackDone,
+ weak_this_,
+ text_stream));
+
+ add_text_track_cb_.Run(config, done_cb);
+}
+
+void TextRenderer::RemoveTextStream(DemuxerStream* text_stream) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(text_stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadIdle);
+ delete state;
+ text_track_state_map_.erase(itr);
+
+ pending_eos_set_.erase(text_stream);
+}
+
+bool TextRenderer::HasTracks() const {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ return !text_track_state_map_.empty();
+}
+
+void TextRenderer::BufferReady(
+ DemuxerStream* stream,
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& input) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_NE(status, DemuxerStream::kConfigChanged);
+
+ if (status == DemuxerStream::kAborted) {
+ DCHECK(!input);
+ DCHECK_GT(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.find(stream) != pending_eos_set_.end());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadPending);
+
+ --pending_read_count_;
+ state->read_state = TextTrackState::kReadIdle;
+
+ switch (state_) {
+ case kPlaying:
+ return;
+
+ case kPausePending:
+ if (pending_read_count_ == 0) {
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ }
+
+ return;
+
+ case kStopPending:
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ }
+
+ return;
+
+ case kPaused:
+ case kStopped:
+ case kUninitialized:
+ case kEnded:
+ NOTREACHED();
+ return;
+ }
+
+ NOTREACHED();
+ return;
+ }
+
+ if (input->end_of_stream()) {
+ CueReady(stream, NULL);
+ return;
+ }
+
+ DCHECK_EQ(status, DemuxerStream::kOk);
+ DCHECK_GE(input->side_data_size(), 2);
+
+ // The side data contains both the cue id and cue settings,
+ // each terminated with a NUL.
+ const char* id_ptr = reinterpret_cast<const char*>(input->side_data());
+ size_t id_len = strlen(id_ptr);
+ std::string id(id_ptr, id_len);
+
+ const char* settings_ptr = id_ptr + id_len + 1;
+ size_t settings_len = strlen(settings_ptr);
+ std::string settings(settings_ptr, settings_len);
+
+ // The cue payload is stored in the data-part of the input buffer.
+ std::string text(input->data(), input->data() + input->data_size());
+
+ scoped_refptr<TextCue> text_cue(
+ new TextCue(input->timestamp(),
+ input->duration(),
+ id,
+ settings,
+ text));
+
+ CueReady(stream, text_cue);
+}
+
+void TextRenderer::CueReady(
+ DemuxerStream* text_stream,
+ const scoped_refptr<TextCue>& text_cue) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized &&
+ state_ != kStopped) << "state_ " << state_;
+ DCHECK_GT(pending_read_count_, 0);
+ DCHECK(pending_eos_set_.find(text_stream) != pending_eos_set_.end());
+
+ TextTrackStateMap::iterator itr = text_track_state_map_.find(text_stream);
+ DCHECK(itr != text_track_state_map_.end());
+
+ TextTrackState* state = itr->second;
+ DCHECK_EQ(state->read_state, TextTrackState::kReadPending);
+ DCHECK(state->text_track);
+
+ --pending_read_count_;
+ state->read_state = TextTrackState::kReadIdle;
+
+ switch (state_) {
+ case kPlaying: {
+ if (text_cue)
+ break;
+
+ const size_t count = pending_eos_set_.erase(text_stream);
+ DCHECK_EQ(count, 1U);
+
+ if (pending_eos_set_.empty()) {
+ DCHECK_EQ(pending_read_count_, 0);
+ state_ = kEnded;
+ ended_cb_.Run();
+ return;
+ }
+
+ DCHECK_GT(pending_read_count_, 0);
+ return;
+ }
+ case kPausePending: {
+ if (text_cue)
+ break;
+
+ const size_t count = pending_eos_set_.erase(text_stream);
+ DCHECK_EQ(count, 1U);
+
+ if (pending_read_count_ > 0) {
+ DCHECK(!pending_eos_set_.empty());
+ return;
+ }
+
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+
+ return;
+ }
+ case kStopPending:
+ if (pending_read_count_ == 0) {
+ state_ = kStopped;
+ base::ResetAndReturn(&stop_cb_).Run();
+ }
+
+ return;
+
+ case kPaused:
+ case kStopped:
+ case kUninitialized:
+ case kEnded:
+ NOTREACHED();
+ return;
+ }
+
+ base::TimeDelta start = text_cue->timestamp();
+ base::TimeDelta end = start + text_cue->duration();
+
+ state->text_track->addWebVTTCue(start, end,
+ text_cue->id(),
+ text_cue->text(),
+ text_cue->settings());
+
+ if (state_ == kPlaying) {
+ Read(state, text_stream);
+ return;
+ }
+
+ if (pending_read_count_ == 0) {
+ DCHECK_EQ(state_, kPausePending) << "state_ " << state_;
+ state_ = kPaused;
+ base::ResetAndReturn(&pause_cb_).Run();
+ }
+}
+
+void TextRenderer::OnAddTextTrackDone(DemuxerStream* text_stream,
+ scoped_ptr<TextTrack> text_track) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized &&
+ state_ != kStopped &&
+ state_ != kStopPending) << "state_ " << state_;
+ DCHECK(text_stream);
+ DCHECK(text_track);
+
+ scoped_ptr<TextTrackState> state(new TextTrackState(text_track.Pass()));
+ text_track_state_map_[text_stream] = state.release();
+ pending_eos_set_.insert(text_stream);
+
+ if (state_ == kPlaying)
+ Read(text_track_state_map_[text_stream], text_stream);
+}
+
+void TextRenderer::Read(
+ TextTrackState* state,
+ DemuxerStream* text_stream) {
+ DCHECK_NE(state->read_state, TextTrackState::kReadPending);
+
+ state->read_state = TextTrackState::kReadPending;
+ ++pending_read_count_;
+
+ text_stream->Read(base::Bind(&TextRenderer::BufferReady,
+ weak_this_,
+ text_stream));
+}
+
+TextRenderer::TextTrackState::TextTrackState(scoped_ptr<TextTrack> tt)
+ : read_state(kReadIdle),
+ text_track(tt.Pass()) {
+}
+
+TextRenderer::TextTrackState::~TextTrackState() {
+}
+
+} // namespace media
diff --git a/chromium/media/base/text_renderer.h b/chromium/media/base/text_renderer.h
new file mode 100644
index 00000000000..532a1fa0376
--- /dev/null
+++ b/chromium/media/base/text_renderer.h
@@ -0,0 +1,145 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_RENDERER_H_
+#define MEDIA_BASE_TEXT_RENDERER_H_
+
+#include <map>
+#include <set>
+
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/text_track.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class TextCue;
+class TextTrackConfig;
+
+// Receives decoder buffers from the upstream demuxer, decodes them to text
+// cues, and then passes them onto the TextTrack object associated with each
+// demuxer text stream.
+class MEDIA_EXPORT TextRenderer {
+ public:
+ // |message_loop| is the thread on which TextRenderer will execute.
+ //
+ // |add_text_track_cb] is called when the demuxer requests (via its host)
+ // that a new text track be created.
+ TextRenderer(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const AddTextTrackCB& add_text_track_cb);
+ ~TextRenderer();
+
+ // |ended_cb| is executed when all of the text tracks have reached
+ // end of stream, following a play request.
+ void Initialize(const base::Closure& ended_cb);
+
+ // Start text track cue decoding and rendering, executing |callback| when
+ // playback is underway.
+ void Play(const base::Closure& callback);
+
+ // Temporarily suspend decoding and rendering, executing |callback| when
+ // playback has been suspended.
+ void Pause(const base::Closure& callback);
+
+ // Discard any text data, executing |callback| when completed.
+ void Flush(const base::Closure& callback);
+
+ // Stop all operations in preparation for being deleted, executing |callback|
+ // when complete.
+ void Stop(const base::Closure& callback);
+
+ // Add new |text_stream|, having the indicated |config|, to the text stream
+ // collection managed by this text renderer.
+ void AddTextStream(DemuxerStream* text_stream,
+ const TextTrackConfig& config);
+
+ // Remove |text_stream| from the text stream collection.
+ void RemoveTextStream(DemuxerStream* text_stream);
+
+ // Returns true if there are extant text tracks.
+ bool HasTracks() const;
+
+ private:
+ struct TextTrackState {
+ // To determine read progress.
+ enum ReadState {
+ kReadIdle,
+ kReadPending
+ };
+
+ explicit TextTrackState(scoped_ptr<TextTrack> text_track);
+ ~TextTrackState();
+
+ ReadState read_state;
+ scoped_ptr<TextTrack> text_track;
+ };
+
+ // Callback delivered by the demuxer |text_stream| when
+ // a read from the stream completes.
+ void BufferReady(DemuxerStream* text_stream,
+ DemuxerStream::Status status,
+ const scoped_refptr<DecoderBuffer>& input);
+
+ // Dispatches the decoded cue delivered on the demuxer's |text_stream|.
+ void CueReady(DemuxerStream* text_stream,
+ const scoped_refptr<TextCue>& text_cue);
+
+ // Dispatched when the AddTextTrackCB completes, after having created
+ // the TextTrack object associated with |text_stream|.
+ void OnAddTextTrackDone(DemuxerStream* text_stream,
+ scoped_ptr<TextTrack> text_track);
+
+ // Utility function to post a read request on |text_stream|.
+ void Read(TextTrackState* state, DemuxerStream* text_stream);
+
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ base::WeakPtrFactory<TextRenderer> weak_factory_;
+ base::WeakPtr<TextRenderer> weak_this_;
+ const AddTextTrackCB add_text_track_cb_;
+
+ // Callbacks provided during Initialize().
+ base::Closure ended_cb_;
+
+ // Callback provided to Pause().
+ base::Closure pause_cb_;
+
+ // Callback provided to Stop().
+ base::Closure stop_cb_;
+
+ // Simple state tracking variable.
+ enum State {
+ kUninitialized,
+ kPausePending,
+ kPaused,
+ kPlaying,
+ kEnded,
+ kStopPending,
+ kStopped
+ };
+ State state_;
+
+ typedef std::map<DemuxerStream*, TextTrackState*> TextTrackStateMap;
+ TextTrackStateMap text_track_state_map_;
+
+ // Indicates how many read requests are in flight.
+ int pending_read_count_;
+
+ // Indicates which text streams have not delivered end-of-stream yet.
+ typedef std::set<DemuxerStream*> PendingEosSet;
+ PendingEosSet pending_eos_set_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TextRenderer);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_RENDERER_H_
diff --git a/chromium/media/base/text_renderer_unittest.cc b/chromium/media/base/text_renderer_unittest.cc
new file mode 100644
index 00000000000..77e8c471824
--- /dev/null
+++ b/chromium/media/base/text_renderer_unittest.cc
@@ -0,0 +1,1382 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/fake_text_track_stream.h"
+#include "media/base/text_renderer.h"
+#include "media/base/text_track_config.h"
+#include "media/base/video_decoder_config.h"
+#include "media/filters/webvtt_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Eq;
+using ::testing::Exactly;
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace media {
+
+// Local implementation of the TextTrack interface.
+class FakeTextTrack : public TextTrack {
+ public:
+ FakeTextTrack(const base::Closure& destroy_cb,
+ const TextTrackConfig& config)
+ : destroy_cb_(destroy_cb),
+ config_(config) {
+ }
+ virtual ~FakeTextTrack() {
+ destroy_cb_.Run();
+ }
+
+ MOCK_METHOD5(addWebVTTCue, void(const base::TimeDelta& start,
+ const base::TimeDelta& end,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings));
+
+ const base::Closure destroy_cb_;
+ const TextTrackConfig config_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeTextTrack);
+};
+
+class TextRendererTest : public testing::Test {
+ public:
+ TextRendererTest() {}
+
+ void CreateTextRenderer() {
+ DCHECK(!text_renderer_);
+
+ text_renderer_.reset(
+ new TextRenderer(message_loop_.message_loop_proxy(),
+ base::Bind(&TextRendererTest::OnAddTextTrack,
+ base::Unretained(this))));
+ text_renderer_->Initialize(base::Bind(&TextRendererTest::OnEnd,
+ base::Unretained(this)));
+ }
+
+ void DestroyTextRenderer() {
+ EXPECT_CALL(*this, OnStop());
+ text_renderer_->Stop(base::Bind(&TextRendererTest::OnStop,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+
+ text_renderer_.reset();
+ text_track_streams_.clear();
+ }
+
+ void AddTextTrack(TextKind kind,
+ const std::string& name,
+ const std::string& language,
+ bool expect_read) {
+ const size_t idx = text_track_streams_.size();
+ text_track_streams_.push_back(new FakeTextTrackStream);
+
+ if (expect_read)
+ ExpectRead(idx);
+
+ const TextTrackConfig config(kind, name, language, std::string());
+ text_renderer_->AddTextStream(text_track_streams_.back(), config);
+ message_loop_.RunUntilIdle();
+
+ EXPECT_EQ(text_tracks_.size(), text_track_streams_.size());
+ FakeTextTrack* const text_track = text_tracks_.back();
+ EXPECT_TRUE(text_track);
+ EXPECT_TRUE(text_track->config_.Matches(config));
+ }
+
+ void OnAddTextTrack(const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb) {
+ base::Closure destroy_cb =
+ base::Bind(&TextRendererTest::OnDestroyTextTrack,
+ base::Unretained(this),
+ text_tracks_.size());
+ // Text track objects are owned by the text renderer, but we cache them
+ // here so we can inspect them. They get removed from our cache when the
+ // text renderer deallocates them.
+ text_tracks_.push_back(new FakeTextTrack(destroy_cb, config));
+ scoped_ptr<TextTrack> text_track(text_tracks_.back());
+ done_cb.Run(text_track.Pass());
+ }
+
+ void RemoveTextTrack(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ text_renderer_->RemoveTextStream(stream);
+ EXPECT_FALSE(text_tracks_[idx]);
+ }
+
+ void SatisfyPendingReads(const base::TimeDelta& start,
+ const base::TimeDelta& duration,
+ const std::string& id,
+ const std::string& content,
+ const std::string& settings) {
+ for (TextTrackStreams::iterator itr = text_track_streams_.begin();
+ itr != text_track_streams_.end(); ++itr) {
+ (*itr)->SatisfyPendingRead(start, duration, id, content, settings);
+ }
+ }
+
+ void AbortPendingRead(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ stream->AbortPendingRead();
+ message_loop_.RunUntilIdle();
+ }
+
+ void AbortPendingReads() {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ AbortPendingRead(idx);
+ }
+ }
+
+ void SendEosNotification(unsigned idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ stream->SendEosNotification();
+ message_loop_.RunUntilIdle();
+ }
+
+ void SendEosNotifications() {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ SendEosNotification(idx);
+ }
+ }
+
+ void SendCue(unsigned idx, bool expect_cue) {
+ FakeTextTrackStream* const text_stream = text_track_streams_[idx];
+
+ const base::TimeDelta start;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(42);
+ const std::string id = "id";
+ const std::string content = "subtitle";
+ const std::string settings;
+
+ if (expect_cue) {
+ FakeTextTrack* const text_track = text_tracks_[idx];
+ EXPECT_CALL(*text_track, addWebVTTCue(start,
+ start + duration,
+ id,
+ content,
+ settings));
+ }
+
+ text_stream->SatisfyPendingRead(start, duration, id, content, settings);
+ message_loop_.RunUntilIdle();
+ }
+
+ void SendCues(bool expect_cue) {
+ for (size_t idx = 0; idx < text_track_streams_.size(); ++idx) {
+ SendCue(idx, expect_cue);
+ }
+ }
+
+ void OnDestroyTextTrack(unsigned idx) {
+ text_tracks_[idx] = NULL;
+ }
+
+ void Play() {
+ EXPECT_CALL(*this, OnPlay());
+ text_renderer_->Play(base::Bind(&TextRendererTest::OnPlay,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void Pause() {
+ text_renderer_->Pause(base::Bind(&TextRendererTest::OnPause,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void Flush() {
+ EXPECT_CALL(*this, OnFlush());
+ text_renderer_->Flush(base::Bind(&TextRendererTest::OnFlush,
+ base::Unretained(this)));
+ }
+
+ void Stop() {
+ text_renderer_->Stop(base::Bind(&TextRendererTest::OnStop,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void ExpectRead(size_t idx) {
+ FakeTextTrackStream* const stream = text_track_streams_[idx];
+ EXPECT_CALL(*stream, OnRead());
+ }
+
+ MOCK_METHOD0(OnEnd, void());
+ MOCK_METHOD0(OnStop, void());
+ MOCK_METHOD0(OnPlay, void());
+ MOCK_METHOD0(OnPause, void());
+ MOCK_METHOD0(OnFlush, void());
+
+ scoped_ptr<TextRenderer> text_renderer_;
+ base::MessageLoop message_loop_;
+
+ typedef ScopedVector<FakeTextTrackStream> TextTrackStreams;
+ TextTrackStreams text_track_streams_;
+
+ typedef std::vector<FakeTextTrack*> TextTracks;
+ TextTracks text_tracks_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextRendererTest);
+};
+
+TEST_F(TextRendererTest, CreateTextRendererNoInit) {
+ text_renderer_.reset(
+ new TextRenderer(message_loop_.message_loop_proxy(),
+ base::Bind(&TextRendererTest::OnAddTextTrack,
+ base::Unretained(this))));
+ text_renderer_.reset();
+}
+
+TEST_F(TextRendererTest, TestStop) {
+ CreateTextRenderer();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnly_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", false);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnly_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "track 1", "", false);
+ AddTextTrack(kTextSubtitles, "track 2", "", false);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayOnly) {
+ CreateTextRenderer();
+ Play();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlay_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlay_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_OneTrackAfter) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_TwoTracksAfter) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlay_OneTrackBeforeOneTrackAfter) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ Play();
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCue_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCue_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ AbortPendingReads();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnly_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnly_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEos_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEos_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, StopPending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, StopPending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayPause_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPause_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePending_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePending_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_SplitEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlush_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlush_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnlyRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", false);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTextTrackOnlyRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "track 1", "", false);
+ AddTextTrack(kTextSubtitles, "track 2", "", false);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_SeparateCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_RemoveOneThenPlay) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", false);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Play();
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackBeforePlayRemove_RemoveTwoThenPlay) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", false);
+ AddTextTrack(kTextSubtitles, "2", "", false);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ Play();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_OneTrack) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_TwoTracks) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_SplitCancel) {
+ CreateTextRenderer();
+ Play();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddTrackAfterPlayRemove_SplitAdd) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ Play();
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCueRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayAddCueRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ AbortPendingRead(0);
+ AbortPendingRead(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnlyRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosOnlyRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEosRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ ExpectRead(0);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueEosRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ ExpectRead(0);
+ ExpectRead(1);
+ SendCues(true);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, TestStopPendingRemove_RemoveThenSendEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Stop();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_PauseThenRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_RemoveThanPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPause_PauseThenRemoveTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_RemoveThenPauseTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingReads();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayPauseRemove_SplitCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+
+TEST_F(TextRendererTest, PlayPauseRemove_PauseLast) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ AbortPendingRead(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPausePendingRemove_SplitEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendCues(true);
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingRemove_SplitSendCue) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPauseRemove_PauseThenRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPauseRemove_RemoveThenPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_PauseThenRemoveTwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_RemovePauseRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosThenPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_PauseLast) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosPauseRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemoveEosPause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosPause_EosRemoveEosRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ RemoveTextTrack(0);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_TwoTracks) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayEosFlushRemove_EosRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotifications();
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ Flush();
+ ExpectRead(0);
+ ExpectRead(1);
+ Play();
+ SendEosNotification(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShort_SendCueThenEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShort_EosThenSendCue) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendEosNotification(0);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShortRemove_SendEosRemove) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayShortRemove_SendRemoveEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ RemoveTextTrack(1);
+ EXPECT_FALSE(text_renderer_->HasTracks());
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ AbortPendingRead(0);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_SendThenCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ SendCue(0, true);
+ EXPECT_CALL(*this, OnPause());
+ AbortPendingRead(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCuePausePendingCancel_CancelThenSend) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ AbortPendingRead(0);
+ EXPECT_CALL(*this, OnPause());
+ SendCue(1, true);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_OneTrack) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ Pause();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ AbortPendingRead(0);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_SendThenCancel) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ Stop();
+ SendCue(0, false);
+ EXPECT_CALL(*this, OnStop());
+ AbortPendingRead(1);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, PlayCueStopPendingCancel_CancelThenSend) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ Pause();
+ Stop();
+ AbortPendingRead(0);
+ EXPECT_CALL(*this, OnStop());
+ SendCue(1, false);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+TEST_F(TextRendererTest, AddRemoveAdd) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_CALL(*this, OnPause());
+ Pause();
+ AddTextTrack(kTextSubtitles, "", "", true);
+ Play();
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemoveEos) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ EXPECT_CALL(*this, OnEnd());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemovePause) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Pause();
+ EXPECT_CALL(*this, OnPause());
+ SendEosNotification(1);
+ DestroyTextRenderer();
+}
+
+TEST_F(TextRendererTest, AddRemovePauseStop) {
+ CreateTextRenderer();
+ AddTextTrack(kTextSubtitles, "1", "", true);
+ AddTextTrack(kTextSubtitles, "2", "", true);
+ Play();
+ AbortPendingRead(0);
+ RemoveTextTrack(0);
+ EXPECT_TRUE(text_renderer_->HasTracks());
+ Pause();
+ Stop();
+ EXPECT_CALL(*this, OnStop());
+ SendEosNotification(1);
+ text_renderer_.reset();
+ text_track_streams_.clear();
+}
+
+} // namespace media
diff --git a/chromium/media/base/text_track.h b/chromium/media/base/text_track.h
index 01a2ed727f9..0e04a0eb46d 100644
--- a/chromium/media/base/text_track.h
+++ b/chromium/media/base/text_track.h
@@ -13,14 +13,7 @@
namespace media {
-// Specifies the varieties of text tracks.
-enum TextKind {
- kTextSubtitles,
- kTextCaptions,
- kTextDescriptions,
- kTextMetadata,
- kTextNone
-};
+class TextTrackConfig;
class TextTrack {
public:
@@ -32,10 +25,12 @@ class TextTrack {
const std::string& settings) = 0;
};
-typedef base::Callback<scoped_ptr<TextTrack>
- (TextKind kind,
- const std::string& label,
- const std::string& language)> AddTextTrackCB;
+typedef base::Callback<void
+ (scoped_ptr<TextTrack>)> AddTextTrackDoneCB;
+
+typedef base::Callback<void
+ (const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb)> AddTextTrackCB;
} // namespace media
diff --git a/chromium/media/base/text_track_config.cc b/chromium/media/base/text_track_config.cc
new file mode 100644
index 00000000000..0d4b11f6ddb
--- /dev/null
+++ b/chromium/media/base/text_track_config.cc
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/text_track_config.h"
+
+namespace media {
+
+TextTrackConfig::TextTrackConfig()
+ : kind_(kTextNone) {
+}
+
+TextTrackConfig::TextTrackConfig(TextKind kind,
+ const std::string& label,
+ const std::string& language,
+ const std::string& id)
+ : kind_(kind),
+ label_(label),
+ language_(language),
+ id_(id) {
+}
+
+bool TextTrackConfig::Matches(const TextTrackConfig& config) const {
+ return config.kind() == kind_ &&
+ config.label() == label_ &&
+ config.language() == language_ &&
+ config.id() == id_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/text_track_config.h b/chromium/media/base/text_track_config.h
new file mode 100644
index 00000000000..58efba4b035
--- /dev/null
+++ b/chromium/media/base/text_track_config.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TEXT_TRACK_CONFIG_H_
+#define MEDIA_BASE_TEXT_TRACK_CONFIG_H_
+
+#include <string>
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Specifies the varieties of text tracks.
+enum TextKind {
+ kTextSubtitles,
+ kTextCaptions,
+ kTextDescriptions,
+ kTextMetadata,
+ kTextNone
+};
+
+class MEDIA_EXPORT TextTrackConfig {
+ public:
+ TextTrackConfig();
+ TextTrackConfig(TextKind kind,
+ const std::string& label,
+ const std::string& language,
+ const std::string& id);
+
+ // Returns true if all fields in |config| match this config.
+ bool Matches(const TextTrackConfig& config) const;
+
+ TextKind kind() const { return kind_; }
+ const std::string& label() const { return label_; }
+ const std::string& language() const { return language_; }
+ const std::string& id() const { return id_; }
+
+ private:
+ TextKind kind_;
+ std::string label_;
+ std::string language_;
+ std::string id_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TEXT_TRACK_H_
diff --git a/chromium/media/base/user_input_monitor_linux.cc b/chromium/media/base/user_input_monitor_linux.cc
index b5dbbe5e0bb..70090eae4bb 100644
--- a/chromium/media/base/user_input_monitor_linux.cc
+++ b/chromium/media/base/user_input_monitor_linux.cc
@@ -18,7 +18,6 @@
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_pump_libevent.h"
-#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "media/base/keyboard_event_counter.h"
@@ -38,7 +37,8 @@ namespace {
// UserInputMonitorLinux since it needs to be deleted on the IO thread.
class UserInputMonitorLinuxCore
: public base::MessagePumpLibevent::Watcher,
- public base::SupportsWeakPtr<UserInputMonitorLinuxCore> {
+ public base::SupportsWeakPtr<UserInputMonitorLinuxCore>,
+ public base::MessageLoop::DestructionObserver {
public:
enum EventType {
MOUSE_EVENT,
@@ -51,6 +51,9 @@ class UserInputMonitorLinuxCore
mouse_listeners);
virtual ~UserInputMonitorLinuxCore();
+ // DestructionObserver overrides.
+ virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
+
size_t GetKeyPressCount() const;
void StartMonitor(EventType type);
void StopMonitor(EventType type);
@@ -123,6 +126,12 @@ UserInputMonitorLinuxCore::~UserInputMonitorLinuxCore() {
DCHECK(!x_record_context_);
}
+void UserInputMonitorLinuxCore::WillDestroyCurrentMessageLoop() {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ StopMonitor(MOUSE_EVENT);
+ StopMonitor(KEYBOARD_EVENT);
+}
+
size_t UserInputMonitorLinuxCore::GetKeyPressCount() const {
return counter_.GetKeyPressCount();
}
@@ -146,6 +155,7 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
if (!x_control_display_ || !x_record_display_) {
LOG(ERROR) << "Couldn't open X display";
+ StopMonitor(type);
return;
}
@@ -153,6 +163,7 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
if (!XQueryExtension(
x_control_display_, "RECORD", &xr_opcode, &xr_event, &xr_error)) {
LOG(ERROR) << "X Record extension not available.";
+ StopMonitor(type);
return;
}
@@ -161,6 +172,7 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
if (!x_record_range_[type]) {
LOG(ERROR) << "XRecordAllocRange failed.";
+ StopMonitor(type);
return;
}
@@ -193,6 +205,7 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
number_of_ranges);
if (!x_record_context_) {
LOG(ERROR) << "XRecordCreateContext failed.";
+ StopMonitor(type);
return;
}
@@ -201,6 +214,7 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
&UserInputMonitorLinuxCore::ProcessReply,
reinterpret_cast<XPointer>(this))) {
LOG(ERROR) << "XRecordEnableContextAsync failed.";
+ StopMonitor(type);
return;
}
@@ -216,8 +230,13 @@ void UserInputMonitorLinuxCore::StartMonitor(EventType type) {
this);
if (!result) {
LOG(ERROR) << "Failed to create X record task.";
+ StopMonitor(type);
return;
}
+
+ // Start observing message loop destruction if we start monitoring the first
+ // event.
+ base::MessageLoop::current()->AddDestructionObserver(this);
}
// Fetch pending events if any.
@@ -243,15 +262,17 @@ void UserInputMonitorLinuxCore::StopMonitor(EventType type) {
x_record_context_ = 0;
controller_.StopWatchingFileDescriptor();
- if (x_record_display_) {
- XCloseDisplay(x_record_display_);
- x_record_display_ = NULL;
- }
- if (x_control_display_) {
- XCloseDisplay(x_control_display_);
- x_control_display_ = NULL;
- }
}
+ if (x_record_display_) {
+ XCloseDisplay(x_record_display_);
+ x_record_display_ = NULL;
+ }
+ if (x_control_display_) {
+ XCloseDisplay(x_control_display_);
+ x_control_display_ = NULL;
+ }
+ // Stop observing message loop destruction if no event is being monitored.
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
}
void UserInputMonitorLinuxCore::OnFileCanReadWithoutBlocking(int fd) {
diff --git a/chromium/media/base/user_input_monitor_win.cc b/chromium/media/base/user_input_monitor_win.cc
index 13b826f01eb..29cedc8b631 100644
--- a/chromium/media/base/user_input_monitor_win.cc
+++ b/chromium/media/base/user_input_monitor_win.cc
@@ -8,6 +8,7 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
@@ -27,7 +28,8 @@ const USHORT kKeyboardUsage = 6;
// This is the actual implementation of event monitoring. It's separated from
// UserInputMonitorWin since it needs to be deleted on the UI thread.
class UserInputMonitorWinCore
- : public base::SupportsWeakPtr<UserInputMonitorWinCore> {
+ : public base::SupportsWeakPtr<UserInputMonitorWinCore>,
+ public base::MessageLoop::DestructionObserver {
public:
enum EventBitMask {
MOUSE_EVENT_MASK = 1,
@@ -40,6 +42,9 @@ class UserInputMonitorWinCore
mouse_listeners);
~UserInputMonitorWinCore();
+ // DestructionObserver overrides.
+ virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
+
size_t GetKeyPressCount() const;
void StartMonitor(EventBitMask type);
void StopMonitor(EventBitMask type);
@@ -101,6 +106,12 @@ UserInputMonitorWinCore::~UserInputMonitorWinCore() {
DCHECK(!events_monitored_);
}
+void UserInputMonitorWinCore::WillDestroyCurrentMessageLoop() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ StopMonitor(MOUSE_EVENT_MASK);
+ StopMonitor(KEYBOARD_EVENT_MASK);
+}
+
size_t UserInputMonitorWinCore::GetKeyPressCount() const {
return counter_.GetKeyPressCount();
}
@@ -129,8 +140,15 @@ void UserInputMonitorWinCore::StartMonitor(EventBitMask type) {
if (!RegisterRawInputDevices(device.get(), 1, sizeof(*device))) {
LOG_GETLASTERROR(ERROR)
<< "RegisterRawInputDevices() failed for RIDEV_INPUTSINK";
+ window_.reset();
return;
}
+
+ // Start observing message loop destruction if we start monitoring the first
+ // event.
+ if (!events_monitored_)
+ base::MessageLoop::current()->AddDestructionObserver(this);
+
events_monitored_ |= type;
}
@@ -150,8 +168,12 @@ void UserInputMonitorWinCore::StopMonitor(EventBitMask type) {
}
events_monitored_ &= ~type;
- if (events_monitored_ == 0)
+ if (events_monitored_ == 0) {
window_.reset();
+
+ // Stop observing message loop destruction if no event is being monitored.
+ base::MessageLoop::current()->RemoveDestructionObserver(this);
+ }
}
LRESULT UserInputMonitorWinCore::OnInput(HRAWINPUT input_handle) {
diff --git a/chromium/media/base/vector_math.cc b/chromium/media/base/vector_math.cc
index de946ca8cbf..32584f5cf64 100644
--- a/chromium/media/base/vector_math.cc
+++ b/chromium/media/base/vector_math.cc
@@ -5,6 +5,8 @@
#include "media/base/vector_math.h"
#include "media/base/vector_math_testing.h"
+#include <algorithm>
+
#include "base/cpu.h"
#include "base/logging.h"
#include "build/build_config.h"
@@ -23,33 +25,42 @@ namespace vector_math {
#if defined(__SSE__)
#define FMAC_FUNC FMAC_SSE
#define FMUL_FUNC FMUL_SSE
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
void Initialize() {}
#else
// X86 CPU detection required. Functions will be set by Initialize().
// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
#define FMAC_FUNC g_fmac_proc_
#define FMUL_FUNC g_fmul_proc_
+#define EWMAAndMaxPower_FUNC g_ewma_power_proc_
typedef void (*MathProc)(const float src[], float scale, int len, float dest[]);
static MathProc g_fmac_proc_ = NULL;
static MathProc g_fmul_proc_ = NULL;
+typedef std::pair<float, float> (*EWMAAndMaxPowerProc)(
+ float initial_value, const float src[], int len, float smoothing_factor);
+static EWMAAndMaxPowerProc g_ewma_power_proc_ = NULL;
void Initialize() {
CHECK(!g_fmac_proc_);
CHECK(!g_fmul_proc_);
+ CHECK(!g_ewma_power_proc_);
const bool kUseSSE = base::CPU().has_sse();
g_fmac_proc_ = kUseSSE ? FMAC_SSE : FMAC_C;
g_fmul_proc_ = kUseSSE ? FMUL_SSE : FMUL_C;
+ g_ewma_power_proc_ = kUseSSE ? EWMAAndMaxPower_SSE : EWMAAndMaxPower_C;
}
#endif
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
#define FMAC_FUNC FMAC_NEON
#define FMUL_FUNC FMUL_NEON
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
void Initialize() {}
#else
// Unknown architecture.
#define FMAC_FUNC FMAC_C
#define FMUL_FUNC FMUL_C
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_C
void Initialize() {}
#endif
@@ -77,6 +88,27 @@ void FMUL_C(const float src[], float scale, int len, float dest[]) {
dest[i] = src[i] * scale;
}
+std::pair<float, float> EWMAAndMaxPower(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // Ensure |src| is 16-byte aligned.
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
+ return EWMAAndMaxPower_FUNC(initial_value, src, len, smoothing_factor);
+}
+
+std::pair<float, float> EWMAAndMaxPower_C(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ std::pair<float, float> result(initial_value, 0.0f);
+ const float weight_prev = 1.0f - smoothing_factor;
+ for (int i = 0; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+ return result;
+}
+
#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
void FMAC_NEON(const float src[], float scale, int len, float dest[]) {
const int rem = len % 4;
@@ -103,6 +135,71 @@ void FMUL_NEON(const float src[], float scale, int len, float dest[]) {
for (int i = last_index; i < len; ++i)
dest[i] = src[i] * scale;
}
+
+std::pair<float, float> EWMAAndMaxPower_NEON(
+ float initial_value, const float src[], int len, float smoothing_factor) {
+ // When the recurrence is unrolled, we see that we can split it into 4
+ // separate lanes of evaluation:
+ //
+ // y[n] = a(S[n]^2) + (1-a)(y[n-1])
+ // = a(S[n]^2) + (1-a)^1(aS[n-1]^2) + (1-a)^2(aS[n-2]^2) + ...
+ // = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ //
+ // where z[n] = a(S[n]^2) + (1-a)^4(z[n-4]) + (1-a)^8(z[n-8]) + ...
+ //
+ // Thus, the strategy here is to compute z[n], z[n-1], z[n-2], and z[n-3] in
+ // each of the 4 lanes, and then combine them to give y[n].
+
+ const int rem = len % 4;
+ const int last_index = len - rem;
+
+ const float32x4_t smoothing_factor_x4 = vdupq_n_f32(smoothing_factor);
+ const float weight_prev = 1.0f - smoothing_factor;
+ const float32x4_t weight_prev_x4 = vdupq_n_f32(weight_prev);
+ const float32x4_t weight_prev_squared_x4 =
+ vmulq_f32(weight_prev_x4, weight_prev_x4);
+ const float32x4_t weight_prev_4th_x4 =
+ vmulq_f32(weight_prev_squared_x4, weight_prev_squared_x4);
+
+ // Compute z[n], z[n-1], z[n-2], and z[n-3] in parallel in lanes 3, 2, 1 and
+ // 0, respectively.
+ float32x4_t max_x4 = vdupq_n_f32(0.0f);
+ float32x4_t ewma_x4 = vsetq_lane_f32(initial_value, vdupq_n_f32(0.0f), 3);
+ int i;
+ for (i = 0; i < last_index; i += 4) {
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_4th_x4);
+ const float32x4_t sample_x4 = vld1q_f32(src + i);
+ const float32x4_t sample_squared_x4 = vmulq_f32(sample_x4, sample_x4);
+ max_x4 = vmaxq_f32(max_x4, sample_squared_x4);
+ ewma_x4 = vmlaq_f32(ewma_x4, sample_squared_x4, smoothing_factor_x4);
+ }
+
+ // y[n] = z[n] + (1-a)^1(z[n-1]) + (1-a)^2(z[n-2]) + (1-a)^3(z[n-3])
+ float ewma = vgetq_lane_f32(ewma_x4, 3);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 2);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 1);
+ ewma_x4 = vmulq_f32(ewma_x4, weight_prev_x4);
+ ewma += vgetq_lane_f32(ewma_x4, 0);
+
+ // Fold the maximums together to get the overall maximum.
+ float32x2_t max_x2 = vpmax_f32(vget_low_f32(max_x4), vget_high_f32(max_x4));
+ max_x2 = vpmax_f32(max_x2, max_x2);
+
+ std::pair<float, float> result(ewma, vget_lane_f32(max_x2, 0));
+
+ // Handle remaining values at the end of |src|.
+ for (; i < len; ++i) {
+ result.first *= weight_prev;
+ const float sample = src[i];
+ const float sample_squared = sample * sample;
+ result.first += sample_squared * smoothing_factor;
+ result.second = std::max(result.second, sample_squared);
+ }
+
+ return result;
+}
#endif
} // namespace vector_math
diff --git a/chromium/media/base/vector_math.h b/chromium/media/base/vector_math.h
index 4764f0b7e3e..a4dea372898 100644
--- a/chromium/media/base/vector_math.h
+++ b/chromium/media/base/vector_math.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_BASE_VECTOR_MATH_H_
#define MEDIA_BASE_VECTOR_MATH_H_
+#include <utility>
+
#include "media/base/media_export.h"
namespace media {
@@ -26,6 +28,16 @@ MEDIA_EXPORT void FMAC(const float src[], float scale, int len, float dest[]);
// |dest| must be aligned by kRequiredAlignment.
MEDIA_EXPORT void FMUL(const float src[], float scale, int len, float dest[]);
+// Computes the exponentially-weighted moving average power of a signal by
+// iterating the recurrence:
+//
+// y[-1] = initial_value
+// y[n] = smoothing_factor * src[n]^2 + (1-smoothing_factor) * y[n-1]
+//
+// Returns the final average power and the maximum squared element value.
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower(
+ float initial_value, const float src[], int len, float smoothing_factor);
+
} // namespace vector_math
} // namespace media
diff --git a/chromium/media/base/vector_math_perftest.cc b/chromium/media/base/vector_math_perftest.cc
new file mode 100644
index 00000000000..9742f2e9534
--- /dev/null
+++ b/chromium/media/base/vector_math_perftest.cc
@@ -0,0 +1,181 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/vector_math.h"
+#include "media/base/vector_math_testing.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+using base::TimeTicks;
+using std::fill;
+
+namespace media {
+
+static const int kBenchmarkIterations = 200000;
+static const int kEWMABenchmarkIterations = 50000;
+static const float kScale = 0.5;
+static const int kVectorSize = 8192;
+
+class VectorMathPerfTest : public testing::Test {
+ public:
+ VectorMathPerfTest() {
+ // Initialize input and output vectors.
+ input_vector_.reset(static_cast<float*>(base::AlignedAlloc(
+ sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
+ output_vector_.reset(static_cast<float*>(base::AlignedAlloc(
+ sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
+ fill(input_vector_.get(), input_vector_.get() + kVectorSize, 1.0f);
+ fill(output_vector_.get(), output_vector_.get() + kVectorSize, 0.0f);
+ }
+
+ void RunBenchmark(void (*fn)(const float[], float, int, float[]),
+ bool aligned,
+ const std::string& test_name,
+ const std::string& trace_name) {
+ TimeTicks start = TimeTicks::HighResNow();
+ for (int i = 0; i < kBenchmarkIterations; ++i) {
+ fn(input_vector_.get(),
+ kScale,
+ kVectorSize - (aligned ? 0 : 1),
+ output_vector_.get());
+ }
+ double total_time_milliseconds =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult(test_name,
+ "",
+ trace_name,
+ kBenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
+ true);
+ }
+
+ void RunBenchmark(
+ std::pair<float, float> (*fn)(float, const float[], int, float),
+ int len,
+ const std::string& test_name,
+ const std::string& trace_name) {
+ TimeTicks start = TimeTicks::HighResNow();
+ for (int i = 0; i < kEWMABenchmarkIterations; ++i) {
+ fn(0.5f, input_vector_.get(), len, 0.1f);
+ }
+ double total_time_milliseconds =
+ (TimeTicks::HighResNow() - start).InMillisecondsF();
+ perf_test::PrintResult(test_name,
+ "",
+ trace_name,
+ kEWMABenchmarkIterations / total_time_milliseconds,
+ "runs/ms",
+ true);
+ }
+
+ protected:
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector_;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector_;
+
+ DISALLOW_COPY_AND_ASSIGN(VectorMathPerfTest);
+};
+
+// Define platform independent function name for FMAC* perf tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define FMAC_FUNC FMAC_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define FMAC_FUNC FMAC_NEON
+#endif
+
+// Benchmark for each optimized vector_math::FMAC() method.
+TEST_F(VectorMathPerfTest, FMAC) {
+ // Benchmark FMAC_C().
+ RunBenchmark(
+ vector_math::FMAC_C, true, "vector_math_fmac", "unoptimized");
+#if defined(FMAC_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+ // Benchmark FMAC_FUNC() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ RunBenchmark(
+ vector_math::FMAC_FUNC, false, "vector_math_fmac", "optimized_unaligned");
+ // Benchmark FMAC_FUNC() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ RunBenchmark(
+ vector_math::FMAC_FUNC, true, "vector_math_fmac", "optimized_aligned");
+#endif
+}
+
+#undef FMAC_FUNC
+
+// Define platform independent function name for FMULBenchmark* tests.
+#if defined(ARCH_CPU_X86_FAMILY)
+#define FMUL_FUNC FMUL_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define FMUL_FUNC FMUL_NEON
+#endif
+
+// Benchmark for each optimized vector_math::FMUL() method.
+TEST_F(VectorMathPerfTest, FMUL) {
+ // Benchmark FMUL_C().
+ RunBenchmark(
+ vector_math::FMUL_C, true, "vector_math_fmul", "unoptimized");
+#if defined(FMUL_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+ // Benchmark FMUL_FUNC() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ RunBenchmark(
+ vector_math::FMUL_FUNC, false, "vector_math_fmul", "optimized_unaligned");
+ // Benchmark FMUL_FUNC() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ RunBenchmark(
+ vector_math::FMUL_FUNC, true, "vector_math_fmul", "optimized_aligned");
+#endif
+}
+
+#undef FMUL_FUNC
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_SSE
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+#define EWMAAndMaxPower_FUNC EWMAAndMaxPower_NEON
+#endif
+
+// Benchmark for each optimized vector_math::EWMAAndMaxPower() method.
+TEST_F(VectorMathPerfTest, EWMAAndMaxPower) {
+ // Benchmark EWMAAndMaxPower_C().
+ RunBenchmark(vector_math::EWMAAndMaxPower_C,
+ kVectorSize,
+ "vector_math_ewma_and_max_power",
+ "unoptimized");
+#if defined(EWMAAndMaxPower_FUNC)
+#if defined(ARCH_CPU_X86_FAMILY)
+ ASSERT_TRUE(base::CPU().has_sse());
+#endif
+ // Benchmark EWMAAndMaxPower_FUNC() with unaligned size.
+ ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
+ sizeof(float)), 0U);
+ RunBenchmark(vector_math::EWMAAndMaxPower_FUNC,
+ kVectorSize - 1,
+ "vector_math_ewma_and_max_power",
+ "optimized_unaligned");
+ // Benchmark EWMAAndMaxPower_FUNC() with aligned size.
+ ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
+ 0U);
+ RunBenchmark(vector_math::EWMAAndMaxPower_FUNC,
+ kVectorSize,
+ "vector_math_ewma_and_max_power",
+ "optimized_aligned");
+#endif
+}
+
+#undef EWMAAndMaxPower_FUNC
+
+} // namespace media
diff --git a/chromium/media/base/vector_math_testing.h b/chromium/media/base/vector_math_testing.h
index 02d14f807c5..b0b304409dd 100644
--- a/chromium/media/base/vector_math_testing.h
+++ b/chromium/media/base/vector_math_testing.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_BASE_VECTOR_MATH_TESTING_H_
#define MEDIA_BASE_VECTOR_MATH_TESTING_H_
+#include <utility>
+
#include "build/build_config.h"
#include "media/base/media_export.h"
@@ -14,12 +16,16 @@ namespace vector_math {
// Optimized versions exposed for testing. See vector_math.h for details.
MEDIA_EXPORT void FMAC_C(const float src[], float scale, int len, float dest[]);
MEDIA_EXPORT void FMUL_C(const float src[], float scale, int len, float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_C(
+ float initial_value, const float src[], int len, float smoothing_factor);
#if defined(ARCH_CPU_X86_FAMILY)
MEDIA_EXPORT void FMAC_SSE(const float src[], float scale, int len,
float dest[]);
MEDIA_EXPORT void FMUL_SSE(const float src[], float scale, int len,
float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_SSE(
+ float initial_value, const float src[], int len, float smoothing_factor);
#endif
#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
@@ -27,6 +33,8 @@ MEDIA_EXPORT void FMAC_NEON(const float src[], float scale, int len,
float dest[]);
MEDIA_EXPORT void FMUL_NEON(const float src[], float scale, int len,
float dest[]);
+MEDIA_EXPORT std::pair<float, float> EWMAAndMaxPower_NEON(
+ float initial_value, const float src[], int len, float smoothing_factor);
#endif
} // namespace vector_math
diff --git a/chromium/media/base/vector_math_unittest.cc b/chromium/media/base/vector_math_unittest.cc
index 2c7740142cb..f8278ce1b5d 100644
--- a/chromium/media/base/vector_math_unittest.cc
+++ b/chromium/media/base/vector_math_unittest.cc
@@ -6,68 +6,50 @@
#define _USE_MATH_DEFINES
#include <cmath>
-#include "base/command_line.h"
#include "base/cpu.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringize_macros.h"
-#include "base/time/time.h"
#include "media/base/vector_math.h"
#include "media/base/vector_math_testing.h"
#include "testing/gtest/include/gtest/gtest.h"
-using base::TimeTicks;
using std::fill;
-// Command line switch for runtime adjustment of benchmark iterations.
-static const char kBenchmarkIterations[] = "vector-math-iterations";
-static const int kDefaultIterations = 10;
+namespace media {
// Default test values.
static const float kScale = 0.5;
static const float kInputFillValue = 1.0;
static const float kOutputFillValue = 3.0;
-
-namespace media {
+static const int kVectorSize = 8192;
class VectorMathTest : public testing::Test {
public:
- static const int kVectorSize = 8192;
VectorMathTest() {
// Initialize input and output vectors.
- input_vector.reset(static_cast<float*>(base::AlignedAlloc(
+ input_vector_.reset(static_cast<float*>(base::AlignedAlloc(
sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
- output_vector.reset(static_cast<float*>(base::AlignedAlloc(
+ output_vector_.reset(static_cast<float*>(base::AlignedAlloc(
sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
}
void FillTestVectors(float input, float output) {
// Setup input and output vectors.
- fill(input_vector.get(), input_vector.get() + kVectorSize, input);
- fill(output_vector.get(), output_vector.get() + kVectorSize, output);
+ fill(input_vector_.get(), input_vector_.get() + kVectorSize, input);
+ fill(output_vector_.get(), output_vector_.get() + kVectorSize, output);
}
void VerifyOutput(float value) {
for (int i = 0; i < kVectorSize; ++i)
- ASSERT_FLOAT_EQ(output_vector.get()[i], value);
- }
-
- int BenchmarkIterations() {
- int vector_math_iterations = kDefaultIterations;
- std::string iterations(
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kBenchmarkIterations));
- if (!iterations.empty())
- base::StringToInt(iterations, &vector_math_iterations);
- return vector_math_iterations;
+ ASSERT_FLOAT_EQ(output_vector_.get()[i], value);
}
protected:
- int benchmark_iterations;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector_;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector_;
DISALLOW_COPY_AND_ASSIGN(VectorMathTest);
};
@@ -80,7 +62,7 @@ TEST_F(VectorMathTest, FMAC) {
SCOPED_TRACE("FMAC");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMAC(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
@@ -88,7 +70,7 @@ TEST_F(VectorMathTest, FMAC) {
SCOPED_TRACE("FMAC_C");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMAC_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
@@ -98,7 +80,7 @@ TEST_F(VectorMathTest, FMAC) {
SCOPED_TRACE("FMAC_SSE");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMAC_SSE(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
#endif
@@ -108,7 +90,7 @@ TEST_F(VectorMathTest, FMAC) {
SCOPED_TRACE("FMAC_NEON");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMAC_NEON(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
#endif
@@ -122,7 +104,7 @@ TEST_F(VectorMathTest, FMUL) {
SCOPED_TRACE("FMUL");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMUL(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
@@ -130,7 +112,7 @@ TEST_F(VectorMathTest, FMUL) {
SCOPED_TRACE("FMUL_C");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMUL_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
@@ -140,7 +122,7 @@ TEST_F(VectorMathTest, FMUL) {
SCOPED_TRACE("FMUL_SSE");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMUL_SSE(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
#endif
@@ -150,142 +132,254 @@ TEST_F(VectorMathTest, FMUL) {
SCOPED_TRACE("FMUL_NEON");
FillTestVectors(kInputFillValue, kOutputFillValue);
vector_math::FMUL_NEON(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ input_vector_.get(), kScale, kVectorSize, output_vector_.get());
VerifyOutput(kResult);
}
#endif
}
-// Define platform independent function name for FMACBenchmark* tests.
-#if defined(ARCH_CPU_X86_FAMILY)
-#define FMAC_FUNC FMAC_SSE
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define FMAC_FUNC FMAC_NEON
-#endif
+namespace {
-// Benchmark for each optimized vector_math::FMAC() method. Original benchmarks
-// were run with --vector-fmac-iterations=200000.
-TEST_F(VectorMathTest, FMACBenchmark) {
- static const int kBenchmarkIterations = BenchmarkIterations();
-
- printf("Benchmarking %d iterations:\n", kBenchmarkIterations);
+class EWMATestScenario {
+ public:
+ EWMATestScenario(float initial_value, const float src[], int len,
+ float smoothing_factor)
+ : initial_value_(initial_value),
+ data_(static_cast<float*>(
+ len == 0 ? NULL :
+ base::AlignedAlloc(len * sizeof(float),
+ vector_math::kRequiredAlignment))),
+ data_len_(len),
+ smoothing_factor_(smoothing_factor),
+ expected_final_avg_(initial_value),
+ expected_max_(0.0f) {
+ if (data_len_ > 0)
+ memcpy(data_.get(), src, len * sizeof(float));
+ }
- // Benchmark FMAC_C().
- FillTestVectors(kInputFillValue, kOutputFillValue);
- TimeTicks start = TimeTicks::HighResNow();
- for (int i = 0; i < kBenchmarkIterations; ++i) {
- vector_math::FMAC_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+ // Copy constructor and assignment operator for ::testing::Values(...).
+ EWMATestScenario(const EWMATestScenario& other) { *this = other; }
+ EWMATestScenario& operator=(const EWMATestScenario& other) {
+ this->initial_value_ = other.initial_value_;
+ this->smoothing_factor_ = other.smoothing_factor_;
+ if (other.data_len_ == 0) {
+ this->data_.reset();
+ } else {
+ this->data_.reset(static_cast<float*>(
+ base::AlignedAlloc(other.data_len_ * sizeof(float),
+ vector_math::kRequiredAlignment)));
+ memcpy(this->data_.get(), other.data_.get(),
+ other.data_len_ * sizeof(float));
+ }
+ this->data_len_ = other.data_len_;
+ this->expected_final_avg_ = other.expected_final_avg_;
+ this->expected_max_ = other.expected_max_;
+ return *this;
}
- double total_time_c_ms = (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FMAC_C took %.2fms.\n", total_time_c_ms);
-#if defined(FMAC_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
+ EWMATestScenario ScaledBy(float scale) const {
+ EWMATestScenario result(*this);
+ float* p = result.data_.get();
+ float* const p_end = p + result.data_len_;
+ for (; p < p_end; ++p)
+ *p *= scale;
+ return result;
+ }
- // Benchmark FMAC_FUNC() with unaligned size.
- ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
- sizeof(float)), 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMAC_FUNC(
- input_vector.get(), kScale, kVectorSize - 1, output_vector.get());
+ EWMATestScenario WithImpulse(float value, int offset) const {
+ EWMATestScenario result(*this);
+ result.data_.get()[offset] = value;
+ return result;
}
- double total_time_optimized_unaligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(FMAC_FUNC) " (unaligned size) took %.2fms; which is %.2fx "
- "faster than FMAC_C.\n", total_time_optimized_unaligned_ms,
- total_time_c_ms / total_time_optimized_unaligned_ms);
-
- // Benchmark FMAC_FUNC() with aligned size.
- ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
- 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMAC_FUNC(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
+
+ EWMATestScenario HasExpectedResult(float final_avg_value,
+ float max_value) const {
+ EWMATestScenario result(*this);
+ result.expected_final_avg_ = final_avg_value;
+ result.expected_max_ = max_value;
+ return result;
}
- double total_time_optimized_aligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(FMAC_FUNC) " (aligned) took %.2fms; which is %.2fx "
- "faster than FMAC_C and %.2fx faster than "
- STRINGIZE(FMAC_FUNC) " (unaligned).\n",
- total_time_optimized_aligned_ms,
- total_time_c_ms / total_time_optimized_aligned_ms,
- total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
-#endif
-}
-#undef FMAC_FUNC
+ void RunTest() const {
+ {
+ SCOPED_TRACE("EWMAAndMaxPower");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+
+ {
+ SCOPED_TRACE("EWMAAndMaxPower_C");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_C(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
-// Define platform independent function name for FMULBenchmark* tests.
#if defined(ARCH_CPU_X86_FAMILY)
-#define FMUL_FUNC FMUL_SSE
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define FMUL_FUNC FMUL_NEON
+ {
+ ASSERT_TRUE(base::CPU().has_sse());
+ SCOPED_TRACE("EWMAAndMaxPower_SSE");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_SSE(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
#endif
-// Benchmark for each optimized vector_math::FMUL() method. Original benchmarks
-// were run with --vector-math-iterations=200000.
-TEST_F(VectorMathTest, FMULBenchmark) {
- static const int kBenchmarkIterations = BenchmarkIterations();
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
+ {
+ SCOPED_TRACE("EWMAAndMaxPower_NEON");
+ const std::pair<float, float>& result = vector_math::EWMAAndMaxPower_NEON(
+ initial_value_, data_.get(), data_len_, smoothing_factor_);
+ EXPECT_NEAR(expected_final_avg_, result.first, 0.0000001f);
+ EXPECT_NEAR(expected_max_, result.second, 0.0000001f);
+ }
+#endif
+ }
- printf("Benchmarking %d iterations:\n", kBenchmarkIterations);
+ private:
+ float initial_value_;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ int data_len_;
+ float smoothing_factor_;
+ float expected_final_avg_;
+ float expected_max_;
+};
- // Benchmark FMUL_C().
- FillTestVectors(kInputFillValue, kOutputFillValue);
- TimeTicks start = TimeTicks::HighResNow();
- for (int i = 0; i < kBenchmarkIterations; ++i) {
- vector_math::FMUL_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- }
- double total_time_c_ms = (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FMUL_C took %.2fms.\n", total_time_c_ms);
+} // namespace
-#if defined(FMUL_FUNC)
-#if defined(ARCH_CPU_X86_FAMILY)
- ASSERT_TRUE(base::CPU().has_sse());
-#endif
+typedef testing::TestWithParam<EWMATestScenario> VectorMathEWMAAndMaxPowerTest;
- // Benchmark FMUL_SSE() with unaligned size.
- ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
- sizeof(float)), 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMUL_FUNC(
- input_vector.get(), kScale, kVectorSize - 1, output_vector.get());
- }
- double total_time_optimized_unaligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(FMUL_FUNC) " (unaligned size) took %.2fms; which is %.2fx "
- "faster than FMUL_C.\n", total_time_optimized_unaligned_ms,
- total_time_c_ms / total_time_optimized_unaligned_ms);
-
- // Benchmark FMUL_SSE() with aligned size.
- ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
- 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMUL_FUNC(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- }
- double total_time_optimized_aligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(FMUL_FUNC) " (aligned) took %.2fms; which is %.2fx "
- "faster than FMUL_C and %.2fx faster than "
- STRINGIZE(FMUL_FUNC) " (unaligned).\n",
- total_time_optimized_aligned_ms,
- total_time_c_ms / total_time_optimized_aligned_ms,
- total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
-#endif
+TEST_P(VectorMathEWMAAndMaxPowerTest, Correctness) {
+ GetParam().RunTest();
}
-#undef FMUL_FUNC
+static const float kZeros[] = { // 32 zeros
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const float kOnes[] = { // 32 ones
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static const float kCheckerboard[] = { // 32 alternating 0, 1
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+};
+
+static const float kInverseCheckerboard[] = { // 32 alternating 1, 0
+ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,
+ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0
+};
+
+INSTANTIATE_TEST_CASE_P(
+ Scenarios, VectorMathEWMAAndMaxPowerTest,
+ ::testing::Values(
+ // Zero-length input: Result should equal initial value.
+ EWMATestScenario(0.0f, NULL, 0, 0.0f).HasExpectedResult(0.0f, 0.0f),
+ EWMATestScenario(1.0f, NULL, 0, 0.0f).HasExpectedResult(1.0f, 0.0f),
+
+ // Smoothing factor of zero: Samples have no effect on result.
+ EWMATestScenario(0.0f, kOnes, 32, 0.0f).HasExpectedResult(0.0f, 1.0f),
+ EWMATestScenario(1.0f, kZeros, 32, 0.0f).HasExpectedResult(1.0f, 0.0f),
+
+ // Smothing factor of one: Result = last sample squared.
+ EWMATestScenario(0.0f, kCheckerboard, 32, 1.0f)
+ .ScaledBy(2.0f)
+ .HasExpectedResult(4.0f, 4.0f),
+ EWMATestScenario(1.0f, kInverseCheckerboard, 32, 1.0f)
+ .ScaledBy(2.0f)
+ .HasExpectedResult(0.0f, 4.0f),
+
+ // Smoothing factor of 1/4, muted signal.
+ EWMATestScenario(1.0f, kZeros, 1, 0.25f)
+ .HasExpectedResult(powf(0.75, 1.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 2, 0.25f)
+ .HasExpectedResult(powf(0.75, 2.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 3, 0.25f)
+ .HasExpectedResult(powf(0.75, 3.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 12, 0.25f)
+ .HasExpectedResult(powf(0.75, 12.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 13, 0.25f)
+ .HasExpectedResult(powf(0.75, 13.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 14, 0.25f)
+ .HasExpectedResult(powf(0.75, 14.0f), 0.0f),
+ EWMATestScenario(1.0f, kZeros, 15, 0.25f)
+ .HasExpectedResult(powf(0.75, 15.0f), 0.0f),
+
+ // Smoothing factor of 1/4, constant full-amplitude signal.
+ EWMATestScenario(0.0f, kOnes, 1, 0.25f).HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 2, 0.25f)
+ .HasExpectedResult(0.4375f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 3, 0.25f)
+ .HasExpectedResult(0.578125f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 12, 0.25f)
+ .HasExpectedResult(0.96832365f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 13, 0.25f)
+ .HasExpectedResult(0.97624274f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 14, 0.25f)
+ .HasExpectedResult(0.98218205f, 1.0f),
+ EWMATestScenario(0.0f, kOnes, 15, 0.25f)
+ .HasExpectedResult(0.98663654f, 1.0f),
+
+ // Smoothing factor of 1/4, checkerboard signal.
+ EWMATestScenario(0.0f, kCheckerboard, 1, 0.25f)
+ .HasExpectedResult(0.0f, 0.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 2, 0.25f)
+ .HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 3, 0.25f)
+ .HasExpectedResult(0.1875f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 12, 0.25f)
+ .HasExpectedResult(0.55332780f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 13, 0.25f)
+ .HasExpectedResult(0.41499585f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 14, 0.25f)
+ .HasExpectedResult(0.56124689f, 1.0f),
+ EWMATestScenario(0.0f, kCheckerboard, 15, 0.25f)
+ .HasExpectedResult(0.42093517f, 1.0f),
+
+ // Smoothing factor of 1/4, inverse checkerboard signal.
+ EWMATestScenario(0.0f, kInverseCheckerboard, 1, 0.25f)
+ .HasExpectedResult(0.25f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 2, 0.25f)
+ .HasExpectedResult(0.1875f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 3, 0.25f)
+ .HasExpectedResult(0.390625f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 12, 0.25f)
+ .HasExpectedResult(0.41499585f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 13, 0.25f)
+ .HasExpectedResult(0.56124689f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 14, 0.25f)
+ .HasExpectedResult(0.42093517f, 1.0f),
+ EWMATestScenario(0.0f, kInverseCheckerboard, 15, 0.25f)
+ .HasExpectedResult(0.56570137f, 1.0f),
+
+ // Smoothing factor of 1/4, impluse signal.
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 0)
+ .HasExpectedResult(0.562500f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 1)
+ .HasExpectedResult(0.75f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 3, 0.25f)
+ .WithImpulse(2.0f, 2)
+ .HasExpectedResult(1.0f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 0)
+ .HasExpectedResult(0.00013394f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 1)
+ .HasExpectedResult(0.00017858f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 2)
+ .HasExpectedResult(0.00023811f, 4.0f),
+ EWMATestScenario(0.0f, kZeros, 32, 0.25f)
+ .WithImpulse(2.0f, 3)
+ .HasExpectedResult(0.00031748f, 4.0f)
+ ));
} // namespace media
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
index da914f22fb9..82d607526bb 100644
--- a/chromium/media/base/video_decoder_config.cc
+++ b/chromium/media/base/video_decoder_config.cc
@@ -12,7 +12,7 @@ namespace media {
VideoDecoderConfig::VideoDecoderConfig()
: codec_(kUnknownVideoCodec),
profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
- format_(VideoFrame::INVALID),
+ format_(VideoFrame::UNKNOWN),
is_encrypted_(false) {
}
@@ -76,6 +76,8 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.VideoPixelFormat", format, VideoFrame::HISTOGRAM_MAX);
}
codec_ = codec;
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index a372889cb55..8a4eb3cce2b 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -26,15 +26,13 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
base::TimeDelta timestamp) {
DCHECK(IsValidConfig(format, coded_size, visible_rect, natural_size));
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
switch (format) {
- case VideoFrame::RGB32:
- frame->AllocateRGB(4u);
- break;
case VideoFrame::YV12:
case VideoFrame::YV12A:
case VideoFrame::YV16:
case VideoFrame::I420:
+ case VideoFrame::YV12J:
frame->AllocateYUV();
break;
default:
@@ -46,26 +44,26 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
// static
std::string VideoFrame::FormatToString(VideoFrame::Format format) {
switch (format) {
- case VideoFrame::INVALID:
- return "INVALID";
- case VideoFrame::RGB32:
- return "RGB32";
+ case VideoFrame::UNKNOWN:
+ return "UNKNOWN";
case VideoFrame::YV12:
return "YV12";
case VideoFrame::YV16:
return "YV16";
- case VideoFrame::EMPTY:
- return "EMPTY";
case VideoFrame::I420:
return "I420";
case VideoFrame::NATIVE_TEXTURE:
return "NATIVE_TEXTURE";
-#if defined(GOOGLE_TV)
+#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
return "HOLE";
-#endif
+#endif // defined(VIDEO_HOLE)
case VideoFrame::YV12A:
return "YV12A";
+ case VideoFrame::YV12J:
+ return "YV12J";
+ case VideoFrame::HISTOGRAM_MAX:
+ return "HISTOGRAM_MAX";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -76,7 +74,7 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size) {
- return (format != VideoFrame::INVALID &&
+ return (format != VideoFrame::UNKNOWN &&
!coded_size.IsEmpty() &&
coded_size.GetArea() <= limits::kMaxCanvas &&
coded_size.width() <= limits::kMaxDimension &&
@@ -93,7 +91,7 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
// static
scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
- const scoped_refptr<MailboxHolder>& mailbox_holder,
+ scoped_ptr<MailboxHolder> mailbox_holder,
uint32 texture_target,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -101,9 +99,13 @@ scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
base::TimeDelta timestamp,
const ReadPixelsCB& read_pixels_cb,
const base::Closure& no_longer_needed_cb) {
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- NATIVE_TEXTURE, coded_size, visible_rect, natural_size, timestamp));
- frame->texture_mailbox_holder_ = mailbox_holder;
+ scoped_refptr<VideoFrame> frame(new VideoFrame(NATIVE_TEXTURE,
+ coded_size,
+ visible_rect,
+ natural_size,
+ timestamp,
+ false));
+ frame->texture_mailbox_holder_ = mailbox_holder.Pass();
frame->texture_target_ = texture_target;
frame->read_pixels_cb_ = read_pixels_cb;
frame->no_longer_needed_cb_ = no_longer_needed_cb;
@@ -118,7 +120,7 @@ void VideoFrame::ReadPixelsFromNativeTexture(const SkBitmap& pixels) {
}
// static
-scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
+scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -134,7 +136,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
switch (format) {
case I420: {
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
frame->shared_memory_handle_ = handle;
frame->strides_[kYPlane] = coded_size.width();
frame->strides_[kUPlane] = coded_size.width() / 2;
@@ -167,7 +169,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
const base::Closure& no_longer_needed_cb) {
DCHECK(format == YV12 || format == YV16 || format == I420) << format;
scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
+ format, coded_size, visible_rect, natural_size, timestamp, false));
frame->strides_[kYPlane] = y_stride;
frame->strides_[kUPlane] = u_stride;
frame->strides_[kVPlane] = v_stride;
@@ -179,10 +181,30 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
}
// static
-scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() {
- return new VideoFrame(
- VideoFrame::EMPTY, gfx::Size(), gfx::Rect(), gfx::Size(),
- base::TimeDelta());
+scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ const base::Closure& no_longer_needed_cb) {
+ scoped_refptr<VideoFrame> wrapped_frame(new VideoFrame(
+ frame->format(), frame->coded_size(), frame->visible_rect(),
+ frame->natural_size(), frame->GetTimestamp(), frame->end_of_stream()));
+
+ for (size_t i = 0; i < NumPlanes(frame->format()); ++i) {
+ wrapped_frame->strides_[i] = frame->stride(i);
+ wrapped_frame->data_[i] = frame->data(i);
+ }
+
+ wrapped_frame->no_longer_needed_cb_ = no_longer_needed_cb;
+ return wrapped_frame;
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
+ return new VideoFrame(VideoFrame::UNKNOWN,
+ gfx::Size(),
+ gfx::Rect(),
+ gfx::Size(),
+ kNoTimestamp(),
+ true);
}
// static
@@ -205,8 +227,8 @@ scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(const gfx::Size& size) {
return CreateColorFrame(size, kBlackY, kBlackUV, kBlackUV, kZero);
}
-#if defined(GOOGLE_TV)
-// This block and other blocks wrapped around #if defined(GOOGLE_TV) is not
+#if defined(VIDEO_HOLE)
+// This block and other blocks wrapped around #if defined(VIDEO_HOLE) is not
// maintained by the general compositor team. Please contact the following
// people instead:
//
@@ -218,29 +240,28 @@ scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
const gfx::Size& size) {
DCHECK(IsValidConfig(VideoFrame::HOLE, size, gfx::Rect(size), size));
scoped_refptr<VideoFrame> frame(new VideoFrame(
- VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta()));
+ VideoFrame::HOLE, size, gfx::Rect(size), size, base::TimeDelta(), false));
return frame;
}
-#endif
+#endif // defined(VIDEO_HOLE)
// static
size_t VideoFrame::NumPlanes(Format format) {
switch (format) {
case VideoFrame::NATIVE_TEXTURE:
-#if defined(GOOGLE_TV)
+#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
-#endif
+#endif // defined(VIDEO_HOLE)
return 0;
- case VideoFrame::RGB32:
- return 1;
case VideoFrame::YV12:
case VideoFrame::YV16:
case VideoFrame::I420:
+ case VideoFrame::YV12J:
return 3;
case VideoFrame::YV12A:
return 4;
- case VideoFrame::EMPTY:
- case VideoFrame::INVALID:
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::HISTOGRAM_MAX:
break;
}
NOTREACHED() << "Unsupported video frame format: " << format;
@@ -255,61 +276,78 @@ static inline size_t RoundUp(size_t value, size_t alignment) {
// static
size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
+ size_t total = 0;
+ for (size_t i = 0; i < NumPlanes(format); ++i)
+ total += PlaneAllocationSize(format, i, coded_size);
+ return total;
+}
+
+// static
+size_t VideoFrame::PlaneAllocationSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size) {
+ const size_t area =
+ RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
switch (format) {
- case VideoFrame::RGB32:
- return coded_size.GetArea() * 4;
case VideoFrame::YV12:
+ case VideoFrame::YV12J:
case VideoFrame::I420: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 3 / 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 4;
+ default:
+ break;
+ }
}
case VideoFrame::YV12A: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 5 / 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ case VideoFrame::kAPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 4;
+ default:
+ break;
+ }
}
case VideoFrame::YV16: {
- const size_t rounded_size =
- RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
- return rounded_size * 2;
+ switch (plane) {
+ case VideoFrame::kYPlane:
+ return area;
+ case VideoFrame::kUPlane:
+ case VideoFrame::kVPlane:
+ return area / 2;
+ default:
+ break;
+ }
}
- case VideoFrame::INVALID:
- case VideoFrame::EMPTY:
+ case VideoFrame::UNKNOWN:
case VideoFrame::NATIVE_TEXTURE:
-#if defined(GOOGLE_TV)
+ case VideoFrame::HISTOGRAM_MAX:
+#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
-#endif
+#endif // defined(VIDEO_HOLE)
break;
}
- NOTREACHED() << "Unsupported video frame format: " << format;
+ NOTREACHED() << "Unsupported video frame format/plane: "
+ << format << "/" << plane;
return 0;
}
-// Release data allocated by AllocateRGB() or AllocateYUV().
+// Release data allocated by AllocateYUV().
static void ReleaseData(uint8* data) {
DCHECK(data);
base::AlignedFree(data);
}
-void VideoFrame::AllocateRGB(size_t bytes_per_pixel) {
- // Round up to align at least at a 16-byte boundary for each row.
- // This is sufficient for MMX and SSE2 reads (movq/movdqa).
- size_t bytes_per_row = RoundUp(coded_size_.width(),
- kFrameSizeAlignment) * bytes_per_pixel;
- size_t aligned_height = RoundUp(coded_size_.height(), kFrameSizeAlignment);
- strides_[VideoFrame::kRGBPlane] = bytes_per_row;
- data_[VideoFrame::kRGBPlane] = reinterpret_cast<uint8*>(
- base::AlignedAlloc(bytes_per_row * aligned_height + kFrameSizePadding,
- kFrameAddressAlignment));
- no_longer_needed_cb_ = base::Bind(&ReleaseData, data_[VideoFrame::kRGBPlane]);
- DCHECK(!(reinterpret_cast<intptr_t>(data_[VideoFrame::kRGBPlane]) & 7));
- COMPILE_ASSERT(0 == VideoFrame::kRGBPlane, RGB_data_must_be_index_0);
-}
-
void VideoFrame::AllocateYUV() {
DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
- format_ == VideoFrame::YV12A || format_ == VideoFrame::I420);
+ format_ == VideoFrame::YV12A || format_ == VideoFrame::I420 ||
+ format_ == VideoFrame::YV12J);
// Align Y rows at least at 16 byte boundaries. The stride for both
// YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
// U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
@@ -364,14 +402,16 @@ VideoFrame::VideoFrame(VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- base::TimeDelta timestamp)
+ base::TimeDelta timestamp,
+ bool end_of_stream)
: format_(format),
coded_size_(coded_size),
visible_rect_(visible_rect),
natural_size_(natural_size),
texture_target_(0),
shared_memory_handle_(base::SharedMemory::NULLHandle()),
- timestamp_(timestamp) {
+ timestamp_(timestamp),
+ end_of_stream_(end_of_stream) {
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
}
@@ -394,10 +434,6 @@ int VideoFrame::row_bytes(size_t plane) const {
DCHECK(IsValidPlane(plane));
int width = coded_size_.width();
switch (format_) {
- // 32bpp.
- case RGB32:
- return width * 4;
-
// Planar, 8bpp.
case YV12A:
if (plane == kAPlane)
@@ -406,6 +442,7 @@ int VideoFrame::row_bytes(size_t plane) const {
case YV12:
case YV16:
case I420:
+ case YV12J:
if (plane == kYPlane)
return width;
return RoundUp(width, 2) / 2;
@@ -423,7 +460,6 @@ int VideoFrame::rows(size_t plane) const {
DCHECK(IsValidPlane(plane));
int height = coded_size_.height();
switch (format_) {
- case RGB32:
case YV16:
return height;
@@ -451,10 +487,9 @@ uint8* VideoFrame::data(size_t plane) const {
return data_[plane];
}
-const scoped_refptr<VideoFrame::MailboxHolder>& VideoFrame::texture_mailbox()
- const {
+VideoFrame::MailboxHolder* VideoFrame::texture_mailbox() const {
DCHECK_EQ(format_, NATIVE_TEXTURE);
- return texture_mailbox_holder_;
+ return texture_mailbox_holder_.get();
}
uint32 VideoFrame::texture_target() const {
@@ -466,10 +501,6 @@ base::SharedMemoryHandle VideoFrame::shared_memory_handle() const {
return shared_memory_handle_;
}
-bool VideoFrame::IsEndOfStream() const {
- return format_ == VideoFrame::EMPTY;
-}
-
void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
for (int plane = 0; plane < kMaxPlanes; ++plane) {
if (!IsValidPlane(plane))
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index df383d0d798..b51bfe96d2d 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -28,8 +28,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
enum {
kMaxPlanes = 4,
- kRGBPlane = 0,
-
kYPlane = 0,
kUPlane = 1,
kVPlane = 2,
@@ -39,31 +37,30 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Surface formats roughly based on FOURCC labels, see:
// http://www.fourcc.org/rgb.php
// http://www.fourcc.org/yuv.php
+ // Logged to UMA, so never reuse values.
enum Format {
- INVALID = 0, // Invalid format value. Used for error reporting.
- RGB32 = 4, // 32bpp RGB packed with extra byte 8:8:8
- YV12 = 6, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
- YV16 = 7, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
- EMPTY = 9, // An empty frame.
- I420 = 11, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
- NATIVE_TEXTURE = 12, // Native texture. Pixel-format agnostic.
-#if defined(GOOGLE_TV)
- HOLE = 13, // Hole frame.
-#endif
- YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+ UNKNOWN = 0, // Unknown format value.
+ YV12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
+ YV16 = 2, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
+ I420 = 3, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+#if defined(VIDEO_HOLE)
+ HOLE = 5, // Hole frame.
+#endif // defined(VIDEO_HOLE)
+ NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
+ YV12J = 7, // JPEG color range version of YV12
+ HISTOGRAM_MAX, // Must always be greatest.
};
// Returns the name of a Format as a string.
static std::string FormatToString(Format format);
- // This class calls the TextureNoLongerNeededCallback when the last reference
- // on the class is destroyed. The VideoFrame holds a reference to the mailbox
- // but anyone else who queries the mailbox should also hold a reference while
- // it is uses the mailbox, to ensure it remains valid. When finished with the
- // mailbox, call Return() with a new sync point, to ensure the mailbox remains
+ // This class calls the TextureNoLongerNeededCallback when this class is
+ // destroyed. Users can query the current sync point associated with this
+ // mailbox with sync_point(), and should call Resync() with a new sync point
+ // to ensure the mailbox remains valid for the issued commands.
// valid for the issued commands.
- class MEDIA_EXPORT MailboxHolder
- : public base::RefCountedThreadSafe<MailboxHolder> {
+ class MEDIA_EXPORT MailboxHolder {
public:
typedef base::Callback<void(uint32 sync_point)>
TextureNoLongerNeededCallback;
@@ -71,15 +68,14 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
MailboxHolder(const gpu::Mailbox& mailbox,
unsigned sync_point,
const TextureNoLongerNeededCallback& release_callback);
+ ~MailboxHolder();
const gpu::Mailbox& mailbox() const { return mailbox_; }
unsigned sync_point() const { return sync_point_; }
- void Return(unsigned sync_point) { sync_point_ = sync_point; }
+ void Resync(unsigned sync_point) { sync_point_ = sync_point; }
private:
- friend class base::RefCountedThreadSafe<MailboxHolder>;
- ~MailboxHolder();
gpu::Mailbox mailbox_;
unsigned sync_point_;
@@ -123,7 +119,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// |read_pixels_cb| may be used to do (slow!) readbacks from the
// texture to main memory.
static scoped_refptr<VideoFrame> WrapNativeTexture(
- const scoped_refptr<MailboxHolder>& mailbox_holder,
+ scoped_ptr<MailboxHolder> mailbox_holder,
uint32 texture_target,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -137,12 +133,13 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// least as large as 4*visible_rect().width()*visible_rect().height().
void ReadPixelsFromNativeTexture(const SkBitmap& pixels);
- // Wraps image data in a buffer backed by a base::SharedMemoryHandle with a
- // VideoFrame. The image data resides in |data| and is assumed to be packed
- // tightly in a buffer of logical dimensions |coded_size| with the appropriate
- // bit depth and plane count as given by |format|. When the frame is
- // destroyed |no_longer_needed_cb.Run()| will be called.
- static scoped_refptr<VideoFrame> WrapExternalSharedMemory(
+ // Wraps packed image data residing in a memory buffer with a VideoFrame.
+ // The image data resides in |data| and is assumed to be packed tightly in a
+ // buffer of logical dimensions |coded_size| with the appropriate bit depth
+ // and plane count as given by |format|. The shared memory handle of the
+ // backing allocation, if present, can be passed in with |handle|. When the
+ // frame is destroyed, |no_longer_needed_cb.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapExternalPackedMemory(
Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -172,9 +169,14 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb);
- // Creates a frame with format equals to VideoFrame::EMPTY, width, height,
- // and timestamp are all 0.
- static scoped_refptr<VideoFrame> CreateEmptyFrame();
+ // Wraps |frame| and calls |no_longer_needed_cb| when the wrapper VideoFrame
+ // gets destroyed.
+ static scoped_refptr<VideoFrame> WrapVideoFrame(
+ const scoped_refptr<VideoFrame>& frame,
+ const base::Closure& no_longer_needed_cb);
+
+ // Creates a frame which indicates end-of-stream.
+ static scoped_refptr<VideoFrame> CreateEOSFrame();
// Allocates YV12 frame based on |size|, and sets its data to the YUV(y,u,v).
static scoped_refptr<VideoFrame> CreateColorFrame(
@@ -186,10 +188,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// equivalent of RGB(0,0,0).
static scoped_refptr<VideoFrame> CreateBlackFrame(const gfx::Size& size);
-#if defined(GOOGLE_TV)
+#if defined(VIDEO_HOLE)
// Allocates a hole frame.
static scoped_refptr<VideoFrame> CreateHoleFrame(const gfx::Size& size);
-#endif
+#endif // defined(VIDEO_HOLE)
static size_t NumPlanes(Format format);
@@ -197,6 +199,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// given coded size and format.
static size_t AllocationSize(Format format, const gfx::Size& coded_size);
+ // Returns the required allocation size for a (tightly packed) plane of the
+ // given coded size and format.
+ static size_t PlaneAllocationSize(Format format,
+ size_t plane,
+ const gfx::Size& coded_size);
+
Format format() const { return format_; }
const gfx::Size& coded_size() const { return coded_size_; }
@@ -219,7 +227,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns the mailbox of the native texture wrapped by this frame. Only
// valid to call if this is a NATIVE_TEXTURE frame. Before using the
// mailbox, the caller must wait for the included sync point.
- const scoped_refptr<MailboxHolder>& texture_mailbox() const;
+ MailboxHolder* texture_mailbox() const;
// Returns the texture target. Only valid for NATIVE_TEXTURE frames.
uint32 texture_target() const;
@@ -228,7 +236,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::SharedMemoryHandle shared_memory_handle() const;
// Returns true if this VideoFrame represents the end of the stream.
- bool IsEndOfStream() const;
+ bool end_of_stream() const { return end_of_stream_; }
base::TimeDelta GetTimestamp() const {
return timestamp_;
@@ -248,11 +256,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- base::TimeDelta timestamp);
+ base::TimeDelta timestamp,
+ bool end_of_stream);
virtual ~VideoFrame();
- // Used internally by CreateFrame().
- void AllocateRGB(size_t bytes_per_pixel);
void AllocateYUV();
// Used to DCHECK() plane parameters.
@@ -280,7 +287,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
uint8* data_[kMaxPlanes];
// Native texture mailbox, if this is a NATIVE_TEXTURE frame.
- scoped_refptr<MailboxHolder> texture_mailbox_holder_;
+ scoped_ptr<MailboxHolder> texture_mailbox_holder_;
uint32 texture_target_;
ReadPixelsCB read_pixels_cb_;
@@ -291,6 +298,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp_;
+ const bool end_of_stream_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
};
diff --git a/chromium/media/base/video_frame_pool.cc b/chromium/media/base/video_frame_pool.cc
new file mode 100644
index 00000000000..4c5a5e31e22
--- /dev/null
+++ b/chromium/media/base/video_frame_pool.cc
@@ -0,0 +1,128 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_frame_pool.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace media {
+
+class VideoFramePool::PoolImpl
+ : public base::RefCountedThreadSafe<VideoFramePool::PoolImpl> {
+ public:
+ PoolImpl();
+
+ // Returns a frame from the pool that matches the specified
+ // parameters or creates a new frame if no suitable frame exists in
+ // the pool. The pool is drained if no matching frame is found.
+ scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp);
+
+ // Shuts down the frame pool and releases all frames in |frames_|.
+ // Once this is called frames will no longer be inserted back into
+ // |frames_|.
+ void Shutdown();
+
+ size_t GetPoolSizeForTesting() const { return frames_.size(); }
+
+ private:
+ friend class base::RefCountedThreadSafe<VideoFramePool::PoolImpl>;
+ ~PoolImpl();
+
+ // Called when the frame wrapper gets destroyed.
+ // |frame| is the actual frame that was wrapped and is placed
+ // in |frames_| by this function so it can be reused.
+ void FrameReleased(const scoped_refptr<VideoFrame>& frame);
+
+ base::Lock lock_;
+ bool is_shutdown_;
+ std::list<scoped_refptr<VideoFrame> > frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(PoolImpl);
+};
+
+VideoFramePool::PoolImpl::PoolImpl() : is_shutdown_(false) {}
+
+VideoFramePool::PoolImpl::~PoolImpl() {
+ DCHECK(is_shutdown_);
+}
+
+scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(!is_shutdown_);
+
+ scoped_refptr<VideoFrame> frame;
+
+ while (!frame && !frames_.empty()) {
+ scoped_refptr<VideoFrame> pool_frame = frames_.front();
+ frames_.pop_front();
+
+ if (pool_frame->format() == format &&
+ pool_frame->coded_size() == coded_size &&
+ pool_frame->visible_rect() == visible_rect &&
+ pool_frame->natural_size() == natural_size) {
+ frame = pool_frame;
+ frame->SetTimestamp(kNoTimestamp());
+ break;
+ }
+ }
+
+ if (!frame) {
+ frame = VideoFrame::CreateFrame(
+ format, coded_size, visible_rect, natural_size, kNoTimestamp());
+ }
+
+ return VideoFrame::WrapVideoFrame(
+ frame, base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
+}
+
+void VideoFramePool::PoolImpl::Shutdown() {
+ base::AutoLock auto_lock(lock_);
+ is_shutdown_ = true;
+ frames_.clear();
+}
+
+void VideoFramePool::PoolImpl::FrameReleased(
+ const scoped_refptr<VideoFrame>& frame) {
+ base::AutoLock auto_lock(lock_);
+ if (is_shutdown_)
+ return;
+
+ frames_.push_back(frame);
+}
+
+VideoFramePool::VideoFramePool() : pool_(new PoolImpl()) {
+}
+
+VideoFramePool::~VideoFramePool() {
+ pool_->Shutdown();
+}
+
+scoped_refptr<VideoFrame> VideoFramePool::CreateFrame(
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp) {
+ return pool_->CreateFrame(format, coded_size, visible_rect, natural_size,
+ timestamp);
+}
+
+size_t VideoFramePool::GetPoolSizeForTesting() const {
+ return pool_->GetPoolSizeForTesting();
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_frame_pool.h b/chromium/media/base/video_frame_pool.h
new file mode 100644
index 00000000000..76b309196a4
--- /dev/null
+++ b/chromium/media/base/video_frame_pool.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_FRAME_POOL_H_
+#define MEDIA_BASE_VIDEO_FRAME_POOL_H_
+
+#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
+
+namespace media {
+
+// Simple VideoFrame pool used to avoid unnecessarily allocating and destroying
+// VideoFrame objects. The pool manages the memory for the VideoFrame
+// returned by CreateFrame(). When one of these VideoFrames is destroyed,
+// the memory is returned to the pool for use by a subsequent CreateFrame()
+// call. The memory in the pool is retained for the life of the
+// VideoFramePool object. If the parameters passed to CreateFrame() change
+// during the life of this object, then the memory used by frames with the old
+// parameter values will be purged from the pool.
+class MEDIA_EXPORT VideoFramePool {
+ public:
+ VideoFramePool();
+ ~VideoFramePool();
+
+ // Returns a frame from the pool that matches the specified
+ // parameters or creates a new frame if no suitable frame exists in
+ // the pool.
+ scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp);
+
+protected:
+ friend class VideoFramePoolTest;
+
+ // Returns the number of frames in the pool for testing purposes.
+ size_t GetPoolSizeForTesting() const;
+
+ private:
+ class PoolImpl;
+ scoped_refptr<PoolImpl> pool_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFramePool);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_FRAME_POOL_H_
diff --git a/chromium/media/base/video_frame_pool_unittest.cc b/chromium/media/base/video_frame_pool_unittest.cc
new file mode 100644
index 00000000000..707279f8b9b
--- /dev/null
+++ b/chromium/media/base/video_frame_pool_unittest.cc
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_frame_pool.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class VideoFramePoolTest : public ::testing::Test {
+ public:
+ VideoFramePoolTest() : pool_(new VideoFramePool()) {}
+
+ scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ int timestamp_ms) {
+ gfx::Size coded_size(320,240);
+ gfx::Rect visible_rect(coded_size);
+ gfx::Size natural_size(coded_size);
+ return pool_->CreateFrame(
+ format, coded_size, visible_rect, natural_size,
+ base::TimeDelta::FromMilliseconds(timestamp_ms));
+ }
+
+ void CheckPoolSize(size_t size) const {
+ EXPECT_EQ(size, pool_->GetPoolSizeForTesting());
+ }
+
+ protected:
+ scoped_ptr<VideoFramePool> pool_;
+};
+
+TEST_F(VideoFramePoolTest, SimpleFrameReuse) {
+ scoped_refptr<VideoFrame> frame = CreateFrame(VideoFrame::YV12, 10);
+ const uint8* old_y_data = frame->data(VideoFrame::kYPlane);
+
+ // Clear frame reference to return the frame to the pool.
+ frame = NULL;
+
+ // Verify that the next frame from the pool uses the same memory.
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 10);
+ EXPECT_EQ(old_y_data, new_frame->data(VideoFrame::kYPlane));
+}
+
+TEST_F(VideoFramePoolTest, SimpleFormatChange) {
+ scoped_refptr<VideoFrame> frame_a = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> frame_b = CreateFrame(VideoFrame::YV12, 10);
+
+ // Clear frame references to return the frames to the pool.
+ frame_a = NULL;
+ frame_b = NULL;
+
+ // Verify that both frames are in the pool.
+ CheckPoolSize(2u);
+
+ // Verify that requesting a frame with a different format causes the pool
+ // to get drained.
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12A, 10);
+ CheckPoolSize(0u);
+}
+
+TEST_F(VideoFramePoolTest, FrameValidAfterPoolDestruction) {
+ scoped_refptr<VideoFrame> frame = CreateFrame(VideoFrame::YV12, 10);
+
+ // Destroy the pool.
+ pool_.reset();
+
+ // Write to the Y plane. The memory tools should detect a
+ // use-after-free if the storage was actually removed by pool destruction.
+ memset(frame->data(VideoFrame::kYPlane), 0xff,
+ frame->rows(VideoFrame::kYPlane) * frame->stride(VideoFrame::kYPlane));
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index b88d20c3639..9c7eab05995 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/format_macros.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "media/base/buffers.h"
@@ -46,40 +47,41 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
ASSERT_EQ(VideoFrame::YV12, yv12_frame->format());
ASSERT_EQ(yv12_frame->stride(VideoFrame::kUPlane),
yv12_frame->stride(VideoFrame::kVPlane));
-
- scoped_refptr<media::VideoFrame> rgb_frame;
- rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGB32,
- yv12_frame->coded_size(),
- yv12_frame->visible_rect(),
- yv12_frame->natural_size(),
- yv12_frame->GetTimestamp());
-
- ASSERT_EQ(yv12_frame->coded_size().width(),
- rgb_frame->coded_size().width());
- ASSERT_EQ(yv12_frame->coded_size().height(),
- rgb_frame->coded_size().height());
+ ASSERT_EQ(
+ yv12_frame->coded_size().width() & (VideoFrame::kFrameSizeAlignment - 1),
+ 0);
+ ASSERT_EQ(
+ yv12_frame->coded_size().height() & (VideoFrame::kFrameSizeAlignment - 1),
+ 0);
+
+ size_t bytes_per_row = yv12_frame->coded_size().width() * 4u;
+ uint8* rgb_data = reinterpret_cast<uint8*>(
+ base::AlignedAlloc(bytes_per_row * yv12_frame->coded_size().height() +
+ VideoFrame::kFrameSizePadding,
+ VideoFrame::kFrameAddressAlignment));
media::ConvertYUVToRGB32(yv12_frame->data(VideoFrame::kYPlane),
yv12_frame->data(VideoFrame::kUPlane),
yv12_frame->data(VideoFrame::kVPlane),
- rgb_frame->data(VideoFrame::kRGBPlane),
- rgb_frame->coded_size().width(),
- rgb_frame->coded_size().height(),
+ rgb_data,
+ yv12_frame->coded_size().width(),
+ yv12_frame->coded_size().height(),
yv12_frame->stride(VideoFrame::kYPlane),
yv12_frame->stride(VideoFrame::kUPlane),
- rgb_frame->stride(VideoFrame::kRGBPlane),
+ bytes_per_row,
media::YV12);
- for (int row = 0; row < rgb_frame->coded_size().height(); ++row) {
+ for (int row = 0; row < yv12_frame->coded_size().height(); ++row) {
uint32* rgb_row_data = reinterpret_cast<uint32*>(
- rgb_frame->data(VideoFrame::kRGBPlane) +
- (rgb_frame->stride(VideoFrame::kRGBPlane) * row));
- for (int col = 0; col < rgb_frame->coded_size().width(); ++col) {
+ rgb_data + (bytes_per_row * row));
+ for (int col = 0; col < yv12_frame->coded_size().width(); ++col) {
SCOPED_TRACE(
base::StringPrintf("Checking (%d, %d)", row, col));
EXPECT_EQ(expect_rgb_color, rgb_row_data[col]);
}
}
+
+ base::AlignedFree(rgb_data);
}
// Fill each plane to its reported extents and verify accessors report non
@@ -157,8 +159,8 @@ TEST(VideoFrame, CreateFrame) {
EXPECT_EQ(MD5DigestToBase16(digest), "911991d51438ad2e1a40ed5f6fc7c796");
// Test an empty frame.
- frame = VideoFrame::CreateEmptyFrame();
- EXPECT_TRUE(frame->IsEndOfStream());
+ frame = VideoFrame::CreateEOSFrame();
+ EXPECT_TRUE(frame->end_of_stream());
}
TEST(VideoFrame, CreateBlackFrame) {
@@ -173,7 +175,7 @@ TEST(VideoFrame, CreateBlackFrame) {
// Test basic properties.
EXPECT_EQ(0, frame->GetTimestamp().InMicroseconds());
- EXPECT_FALSE(frame->IsEndOfStream());
+ EXPECT_FALSE(frame->end_of_stream());
// Test |frame| properties.
EXPECT_EQ(VideoFrame::YV12, frame->format());
@@ -204,8 +206,6 @@ TEST(VideoFrame, CheckFrameExtents) {
// and the expected hash of all planes if filled with kFillByte (defined in
// ExpectFrameExtents).
ExpectFrameExtents(
- VideoFrame::RGB32, 1, 4, "de6d3d567e282f6a38d478f04fc81fb0");
- ExpectFrameExtents(
VideoFrame::YV12, 3, 1, "71113bdfd4c0de6cf62f48fb74f7a0b1");
ExpectFrameExtents(
VideoFrame::YV16, 3, 1, "9bb99ac3ff350644ebff4d28dc01b461");
@@ -223,17 +223,17 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
+ make_scoped_ptr(new VideoFrame::MailboxHolder(
gpu::Mailbox(),
sync_point,
- base::Bind(&TextureCallback, &called_sync_point)),
- 5, // texture_target
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
+ base::Bind(&TextureCallback, &called_sync_point))),
+ 5, // texture_target
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
+ base::Closure()); // no_longer_needed_cb
EXPECT_EQ(0u, called_sync_point);
}
@@ -241,7 +241,7 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
}
// Verify the TextureNoLongerNeededCallback is called when VideoFrame is
-// destroyed with the new sync point, when the mailbox is taken by a caller.
+// destroyed with the new sync point, when the mailbox is accessed by a caller.
TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
uint32 called_sync_point = 0;
@@ -252,173 +252,28 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
+ make_scoped_ptr(new VideoFrame::MailboxHolder(
mailbox,
sync_point,
- base::Bind(&TextureCallback, &called_sync_point)),
+ base::Bind(&TextureCallback, &called_sync_point))),
target,
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta(), // timestamp
base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- {
- scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder =
- frame->texture_mailbox();
-
- EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point());
- EXPECT_EQ(target, frame->texture_target());
-
- // Misuse the callback.
- sync_point = 12;
- mailbox_holder->Return(sync_point);
- EXPECT_EQ(0u, called_sync_point);
-
- // Finish using the mailbox_holder and drop our reference.
- sync_point = 10;
- mailbox_holder->Return(sync_point);
- }
- EXPECT_EQ(0u, called_sync_point);
- }
- EXPECT_EQ(sync_point, called_sync_point);
-}
-
-// If a caller has taken ownership of the texture mailbox, it should
-// not be released when the VideoFrame is destroyed, but should when
-// the TextureNoLongerNeededCallback is called.
-TEST(VideoFrame,
- TextureNoLongerNeededCallbackAfterTakingMailboxWithDestroyedFrame) {
- uint32 called_sync_point = 0;
-
- gpu::Mailbox mailbox;
- mailbox.name[0] = 50;
- uint32 sync_point = 7;
- uint32 target = 9;
-
- {
- scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder;
-
- {
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
- mailbox,
- sync_point,
- base::Bind(&TextureCallback, &called_sync_point)),
- target,
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- mailbox_holder = frame->texture_mailbox();
-
- EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point());
- EXPECT_EQ(target, frame->texture_target());
-
- // Keep a ref on the mailbox_holder after the VideoFrame is dropped.
- }
- EXPECT_EQ(0u, called_sync_point);
+ base::Closure()); // no_longer_needed_cb
- // Misuse the callback.
- sync_point = 12;
- mailbox_holder->Return(sync_point);
- EXPECT_EQ(0u, called_sync_point);
-
- // Finish using the mailbox_holder and drop our ref.
- sync_point = 10;
- mailbox_holder->Return(sync_point);
- }
- EXPECT_EQ(sync_point, called_sync_point);
-}
-
-// If a caller has taken ownership of the texture mailbox, but does
-// not call the callback, it should still happen with the original
-// sync point.
-TEST(VideoFrame,
- TextureNoLongerNeededCallbackWhenNotCallingAndFrameDestroyed) {
- uint32 called_sync_point = 0;
-
- gpu::Mailbox mailbox;
- mailbox.name[0] = 50;
- uint32 sync_point = 7;
- uint32 target = 9;
-
- {
- scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder;
-
- {
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
- mailbox,
- sync_point,
- base::Bind(&TextureCallback, &called_sync_point)),
- target,
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- mailbox_holder = frame->texture_mailbox();
-
- EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
- EXPECT_EQ(sync_point, mailbox_holder->sync_point());
- EXPECT_EQ(target, frame->texture_target());
-
- // Destroy the video frame.
- }
- EXPECT_EQ(0u, called_sync_point);
-
- // Drop the reference on the mailbox without using it at all.
- }
- EXPECT_EQ(sync_point, called_sync_point);
-}
-
-// If a caller has taken ownership of the texture mailbox, but does
-// not call the callback, it should still happen with the original
-// sync point.
-TEST(VideoFrame,
- TextureNoLongerNeededCallbackAfterTakingMailboxAndNotCalling) {
- uint32 called_sync_point = 0;
-
- gpu::Mailbox mailbox;
- mailbox.name[0] = 50;
- uint32 sync_point = 7;
- uint32 target = 9;
-
- {
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
- mailbox,
- sync_point,
- base::Bind(&TextureCallback, &called_sync_point)),
- target,
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta(), // timestamp
- base::Callback<void(const SkBitmap&)>(), // read_pixels_cb
- base::Closure()); // no_longer_needed_cb
-
- scoped_refptr<VideoFrame::MailboxHolder> mailbox_holder =
- frame->texture_mailbox();
+ VideoFrame::MailboxHolder* mailbox_holder = frame->texture_mailbox();
EXPECT_EQ(mailbox.name[0], mailbox_holder->mailbox().name[0]);
EXPECT_EQ(sync_point, mailbox_holder->sync_point());
EXPECT_EQ(target, frame->texture_target());
- EXPECT_EQ(0u, called_sync_point);
-
- // Don't use the mailbox at all and drop our ref on it.
+ // Finish using the mailbox_holder and drop our reference.
+ sync_point = 10;
+ mailbox_holder->Resync(sync_point);
}
- // The VideoFrame is destroyed, it should call the callback.
EXPECT_EQ(sync_point, called_sync_point);
}
diff --git a/chromium/media/base/video_renderer.h b/chromium/media/base/video_renderer.h
index 84ce6cfa017..2650221e3d8 100644
--- a/chromium/media/base/video_renderer.h
+++ b/chromium/media/base/video_renderer.h
@@ -74,10 +74,13 @@ class MEDIA_EXPORT VideoRenderer {
// Discard any video data, executing |callback| when completed.
virtual void Flush(const base::Closure& callback) = 0;
- // Start prerolling video data for samples starting at |time|, executing
- // |callback| when completed.
+ // Start prerolling video data. If |time| equals kNoTimestamp() then all
+ // samples delivered to the renderer are used to complete preroll. If |time|
+ // does not equal kNoTimestamp(), then any samples delivered to the renderer
+ // with timestamps less than |time| are silently dropped and not used to
+ // satisfy preroll. |callback| is executed when preroll has completed.
//
- // Only valid to call after a successful Initialize() or Flush().
+ // Only valid to call after a successful Initialize(), Pause(), or Flush().
virtual void Preroll(base::TimeDelta time,
const PipelineStatusCB& callback) = 0;
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
index fda758efecb..09f37b87169 100644
--- a/chromium/media/base/video_util.cc
+++ b/chromium/media/base/video_util.cc
@@ -144,6 +144,7 @@ void LetterboxYUV(VideoFrame* frame, const gfx::Rect& view_area) {
DCHECK(!(view_area.width() & 1));
DCHECK(!(view_area.height() & 1));
DCHECK(frame->format() == VideoFrame::YV12 ||
+ frame->format() == VideoFrame::YV12J ||
frame->format() == VideoFrame::I420);
LetterboxPlane(frame, VideoFrame::kYPlane, view_area, 0x00);
gfx::Rect half_view_area(view_area.x() / 2,
diff --git a/chromium/media/base/yuv_convert_unittest.cc b/chromium/media/base/yuv_convert_unittest.cc
index 21a82f1ea25..7c964f3771a 100644
--- a/chromium/media/base/yuv_convert_unittest.cc
+++ b/chromium/media/base/yuv_convert_unittest.cc
@@ -51,11 +51,11 @@ static void ReadData(const base::FilePath::CharType* filename,
// Verify file size is correct.
int64 actual_size = 0;
- file_util::GetFileSize(path, &actual_size);
+ base::GetFileSize(path, &actual_size);
CHECK_EQ(actual_size, expected_size);
// Verify bytes read are correct.
- int bytes_read = file_util::ReadFile(
+ int bytes_read = base::ReadFile(
path, reinterpret_cast<char*>(data->get()), expected_size);
CHECK_EQ(bytes_read, expected_size);
}
@@ -371,9 +371,9 @@ TEST(YUVConvertTest, RGB32ToYUV) {
.Append(FILE_PATH_LITERAL("data"))
.Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
EXPECT_EQ(static_cast<int>(kYUV12Size),
- file_util::ReadFile(yuv_url,
- reinterpret_cast<char*>(yuv_bytes.get()),
- static_cast<int>(kYUV12Size)));
+ base::ReadFile(yuv_url,
+ reinterpret_cast<char*>(yuv_bytes.get()),
+ static_cast<int>(kYUV12Size)));
// Convert a frame of YUV to 32 bit ARGB.
media::ConvertYUVToRGB32(yuv_bytes.get(),
@@ -451,9 +451,9 @@ TEST(YUVConvertTest, DownScaleYUVToRGB32WithRect) {
const size_t size_of_yuv = kSourceYSize * 12 / 8; // 12 bpp.
scoped_ptr<uint8[]> yuv_bytes(new uint8[size_of_yuv]);
EXPECT_EQ(static_cast<int>(size_of_yuv),
- file_util::ReadFile(yuv_url,
- reinterpret_cast<char*>(yuv_bytes.get()),
- static_cast<int>(size_of_yuv)));
+ base::ReadFile(yuv_url,
+ reinterpret_cast<char*>(yuv_bytes.get()),
+ static_cast<int>(size_of_yuv)));
// Scale the full frame of YUV to 32 bit ARGB.
// The API currently only supports down-scaling, so we don't test up-scaling.
diff --git a/chromium/media/cast/DEPS b/chromium/media/cast/DEPS
index 8e10c67d316..f84b3fbbf5e 100644
--- a/chromium/media/cast/DEPS
+++ b/chromium/media/cast/DEPS
@@ -1,4 +1,8 @@
include_rules = [
- "+net",
+ "+crypto",
+ "+media",
+ "+net",
"+third_party/webrtc",
+ "+third_party/libyuv",
+ "+ui/gfx",
]
diff --git a/chromium/media/cast/OWNERS b/chromium/media/cast/OWNERS
index 22e814b0a70..49f41be49c0 100644
--- a/chromium/media/cast/OWNERS
+++ b/chromium/media/cast/OWNERS
@@ -1,2 +1,4 @@
hclam@chromium.org
hubbe@chromium.org
+mikhal@chromium.org
+pwestin@google.com
diff --git a/chromium/media/cast/README b/chromium/media/cast/README
index 4878967fd5c..eca4cf6a1f9 100644
--- a/chromium/media/cast/README
+++ b/chromium/media/cast/README
@@ -16,7 +16,7 @@ cast/audio_sender/
cast/congestion_control/
Bandwidth estimation and network congestion handling.
-cast/pacing/
+cast/net/pacing/
Module for rate limiting data outflow.
cast/rtcp/
@@ -28,7 +28,7 @@ cast/rtp_common/
cast/rtp_receiver/
Module for reciving RTP messages.
-cast/rtp_sender/
+cast/net/rtp_sender/
Module for sending RTP messages.
cast/test/
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.cc b/chromium/media/cast/audio_receiver/audio_decoder.cc
index 266c04ea20d..a761a5a84de 100644
--- a/chromium/media/cast/audio_receiver/audio_decoder.cc
+++ b/chromium/media/cast/audio_receiver/audio_decoder.cc
@@ -11,11 +11,16 @@
namespace media {
namespace cast {
-AudioDecoder::AudioDecoder(scoped_refptr<CastThread> cast_thread,
- const AudioReceiverConfig& audio_config)
- : cast_thread_(cast_thread),
- have_received_packets_(false) {
- audio_decoder_ = webrtc::AudioCodingModule::Create(0);
+AudioDecoder::AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioReceiverConfig& audio_config,
+ RtpPayloadFeedback* incoming_payload_feedback)
+ : cast_environment_(cast_environment),
+ audio_decoder_(webrtc::AudioCodingModule::Create(0)),
+ cast_message_builder_(cast_environment->Clock(),
+ incoming_payload_feedback, &frame_id_map_, audio_config.incoming_ssrc,
+ true, 0),
+ have_received_packets_(false),
+ last_played_out_timestamp_(0) {
audio_decoder_->InitializeReceiver();
webrtc::CodecInst receive_codec;
@@ -37,26 +42,35 @@ AudioDecoder::AudioDecoder(scoped_refptr<CastThread> cast_thread,
receive_codec.rate = -1;
break;
case kExternalAudio:
- DCHECK(false) << "Codec must be specified for audio decoder";
+ NOTREACHED() << "Codec must be specified for audio decoder";
break;
}
if (audio_decoder_->RegisterReceiveCodec(receive_codec) != 0) {
- DCHECK(false) << "Failed to register receive codec";
+ NOTREACHED() << "Failed to register receive codec";
}
audio_decoder_->SetMaximumPlayoutDelay(audio_config.rtp_max_delay_ms);
audio_decoder_->SetPlayoutMode(webrtc::streaming);
}
-AudioDecoder::~AudioDecoder() {
- webrtc::AudioCodingModule::Destroy(audio_decoder_);
-}
+AudioDecoder::~AudioDecoder() {}
bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency,
PcmAudioFrame* audio_frame,
uint32* rtp_timestamp) {
- if (!have_received_packets_) return false;
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER));
+ // We don't care about the race case where a packet arrives at the same time
+ // as this function in called. The data will be there the next time this
+ // function is called.
+ lock_.Acquire();
+ // Get a local copy under lock.
+ bool have_received_packets = have_received_packets_;
+ lock_.Release();
+
+ if (!have_received_packets) return false;
+
+ audio_frame->samples.clear();
for (int i = 0; i < number_of_10ms_blocks; ++i) {
webrtc::AudioFrame webrtc_audio_frame;
@@ -77,6 +91,9 @@ bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
if (0 != audio_decoder_->PlayoutTimestamp(rtp_timestamp)) {
return false;
}
+ lock_.Acquire();
+ last_played_out_timestamp_ = *rtp_timestamp;
+ lock_.Release();
}
int samples_per_10ms = webrtc_audio_frame.samples_per_channel_;
@@ -89,11 +106,56 @@ bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
}
void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK_LE(payload_size, kIpPacketSize);
+ audio_decoder_->IncomingPacket(payload_data, static_cast<int32>(payload_size),
+ rtp_header.webrtc);
+ lock_.Acquire();
have_received_packets_ = true;
- audio_decoder_->IncomingPacket(payload_data, payload_size, rtp_header.webrtc);
+ uint32 last_played_out_timestamp = last_played_out_timestamp_;
+ lock_.Release();
+
+ bool complete = false;
+ if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return;
+ if (!complete) return;
+
+ cast_message_builder_.CompleteFrameReceived(rtp_header.frame_id,
+ rtp_header.is_key_frame);
+
+ frame_id_rtp_timestamp_map_[rtp_header.frame_id] =
+ rtp_header.webrtc.header.timestamp;
+
+ if (last_played_out_timestamp == 0) return; // Nothing is played out yet.
+
+ uint32 latest_frame_id_to_remove = 0;
+ bool frame_to_remove = false;
+
+ FrameIdRtpTimestampMap::iterator it = frame_id_rtp_timestamp_map_.begin();
+ while (it != frame_id_rtp_timestamp_map_.end()) {
+ if (IsNewerRtpTimestamp(it->second, last_played_out_timestamp)) {
+ break;
+ }
+ frame_to_remove = true;
+ latest_frame_id_to_remove = it->first;
+ frame_id_rtp_timestamp_map_.erase(it);
+ it = frame_id_rtp_timestamp_map_.begin();
+ }
+ if (!frame_to_remove) return;
+
+ frame_id_map_.RemoveOldFrames(latest_frame_id_to_remove);
+}
+
+bool AudioDecoder::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ return cast_message_builder_.TimeToSendNextCastMessage(time_to_send);
+}
+
+void AudioDecoder::SendCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_message_builder_.UpdateCastMessage();
}
} // namespace cast
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_decoder.h b/chromium/media/cast/audio_receiver/audio_decoder.h
index 2f5f13aea2d..8a77d79d070 100644
--- a/chromium/media/cast/audio_receiver/audio_decoder.h
+++ b/chromium/media/cast/audio_receiver/audio_decoder.h
@@ -6,10 +6,12 @@
#define MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
#include "base/callback.h"
-#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/framer/cast_message_builder.h"
+#include "media/cast/framer/frame_id_map.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace webrtc {
class AudioCodingModule;
@@ -18,32 +20,47 @@ class AudioCodingModule;
namespace media {
namespace cast {
+typedef std::map<uint32, uint32> FrameIdRtpTimestampMap;
+
// Thread safe class.
-// It should be called from the main cast thread; however that is not required.
-class AudioDecoder : public base::RefCountedThreadSafe<AudioDecoder> {
+class AudioDecoder {
public:
- explicit AudioDecoder(scoped_refptr<CastThread> cast_thread,
- const AudioReceiverConfig& audio_config);
-
+ AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioReceiverConfig& audio_config,
+ RtpPayloadFeedback* incoming_payload_feedback);
virtual ~AudioDecoder();
// Extract a raw audio frame from the decoder.
// Set the number of desired 10ms blocks and frequency.
+ // Should be called from the cast audio decoder thread; however that is not
+ // required.
bool GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency,
PcmAudioFrame* audio_frame,
uint32* rtp_timestamp);
// Insert an RTP packet to the decoder.
+ // Should be called from the main cast thread; however that is not required.
void IncomingParsedRtpPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header);
+ bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
+ void SendCastMessage();
+
private:
- // Can't use scoped_ptr due to protected constructor within webrtc.
- webrtc::AudioCodingModule* audio_decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ // The webrtc AudioCodingModule is threadsafe.
+ scoped_ptr<webrtc::AudioCodingModule> audio_decoder_;
+
+ FrameIdMap frame_id_map_;
+ CastMessageBuilder cast_message_builder_;
+
+ base::Lock lock_;
bool have_received_packets_;
- scoped_refptr<CastThread> cast_thread_;
+ FrameIdRtpTimestampMap frame_id_rtp_timestamp_map_;
+ uint32 last_played_out_timestamp_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
@@ -51,4 +68,4 @@ class AudioDecoder : public base::RefCountedThreadSafe<AudioDecoder> {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_ \ No newline at end of file
+#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_DECODER_H_
diff --git a/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc b/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
index dbe3e324df0..04df4728bd9 100644
--- a/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ b/chromium/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -2,38 +2,49 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/bind.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
#include "media/cast/audio_receiver/audio_decoder.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = 123456789;
+namespace {
+class TestRtpPayloadFeedback : public RtpPayloadFeedback {
+ public:
+ TestRtpPayloadFeedback() {}
+ virtual ~TestRtpPayloadFeedback() {}
+
+ virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
+ EXPECT_EQ(1u, cast_feedback.ack_frame_id_);
+ EXPECT_EQ(0u, cast_feedback.missing_frames_and_packets_.size());
+ }
+};
+} // namespace.
class AudioDecoderTest : public ::testing::Test {
protected:
- AudioDecoderTest() {}
-
- ~AudioDecoderTest() {}
-
- virtual void SetUp() {
+ AudioDecoderTest() {
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(1234));
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
}
+ virtual ~AudioDecoderTest() {}
+
void Configure(const AudioReceiverConfig& audio_config) {
- audio_decoder_ = new AudioDecoder(cast_thread_, audio_config);
+ audio_decoder_.reset(
+ new AudioDecoder(cast_environment_, audio_config, &cast_feedback_));
}
+ TestRtpPayloadFeedback cast_feedback_;
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastThread> cast_thread_;
- scoped_refptr<AudioDecoder> audio_decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<AudioDecoder> audio_decoder_;
};
TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
@@ -56,23 +67,26 @@ TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
rtp_header.webrtc.type.Audio.isCNG = false;
std::vector<int16> payload(640, 0x1234);
-
- uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- int payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
-
int number_of_10ms_blocks = 4;
int desired_frequency = 16000;
PcmAudioFrame audio_frame;
uint32 rtp_timestamp;
+ EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
+
+ uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
+ size_t payload_size = payload.size() * sizeof(int16);
+
+ audio_decoder_->IncomingParsedRtpPacket(payload_data,
+ payload_size, rtp_header);
+
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
-
EXPECT_EQ(1, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
@@ -80,7 +94,6 @@ TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
for (size_t i = 10; i < audio_frame.samples.size(); ++i) {
EXPECT_EQ(0x3412, audio_frame.samples[i]);
}
- task_runner_->RunTasks();
}
TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
@@ -93,6 +106,7 @@ TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
Configure(audio_config);
RtpCastHeader rtp_header;
+ rtp_header.frame_id = 0;
rtp_header.webrtc.header.payloadType = 127;
rtp_header.webrtc.header.sequenceNumber = 1234;
rtp_header.webrtc.header.timestamp = 0x87654321;
@@ -106,11 +120,10 @@ TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
std::vector<int16> payload(640, 0x1234);
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- int payload_size = payload.size() * sizeof(int16);
-
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
+ size_t payload_size = payload.size() * sizeof(int16);
+ audio_decoder_->IncomingParsedRtpPacket(payload_data,
+ payload_size, rtp_header);
int number_of_10ms_blocks = 2;
int desired_frequency = 16000;
@@ -121,29 +134,36 @@ TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
desired_frequency,
&audio_frame,
&rtp_timestamp));
-
EXPECT_EQ(2, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
+ // First 10 samples per channel are 0 from NetEq.
for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
++i) {
EXPECT_EQ(0x3412, audio_frame.samples[i]);
}
+ rtp_header.frame_id++;
rtp_header.webrtc.header.sequenceNumber++;
rtp_header.webrtc.header.timestamp += (audio_config.frequency / 100) * 2 * 2;
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
+ audio_decoder_->IncomingParsedRtpPacket(payload_data,
+ payload_size, rtp_header);
+
+ EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
+ desired_frequency,
+ &audio_frame,
+ &rtp_timestamp));
EXPECT_EQ(2, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
- // First 10 samples per channel are 0 from NetEq.
- for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
- ++i) {
- EXPECT_EQ(0x3412, audio_frame.samples[i]);
+ for (size_t i = 0; i < audio_frame.samples.size(); ++i) {
+ EXPECT_NEAR(0x3412, audio_frame.samples[i], 1000);
}
- task_runner_->RunTasks();
+ // Test cast callback.
+ audio_decoder_->SendCastMessage();
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(33));
+ audio_decoder_->SendCastMessage();
}
TEST_F(AudioDecoderTest, Pcm16Resample) {
@@ -169,10 +189,10 @@ TEST_F(AudioDecoderTest, Pcm16Resample) {
std::vector<int16> payload(640, 0x1234);
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
- int payload_size = payload.size() * sizeof(int16);
+ size_t payload_size = payload.size() * sizeof(int16);
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
+ audio_decoder_->IncomingParsedRtpPacket(payload_data,
+ payload_size, rtp_header);
int number_of_10ms_blocks = 2;
int desired_frequency = 48000;
@@ -194,7 +214,6 @@ TEST_F(AudioDecoderTest, Pcm16Resample) {
EXPECT_NEAR(0x3412, audio_frame.samples[i], 400);
if (0x3412 == audio_frame.samples[i]) count++;
}
- task_runner_->RunTasks();
}
} // namespace cast
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.cc b/chromium/media/cast/audio_receiver/audio_receiver.cc
index cf8a8b8b1da..5aad22f628c 100644
--- a/chromium/media/cast/audio_receiver/audio_receiver.cc
+++ b/chromium/media/cast/audio_receiver/audio_receiver.cc
@@ -7,56 +7,44 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/audio_receiver/audio_decoder.h"
#include "media/cast/framer/framer.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
-#include "third_party/webrtc/system_wrappers/interface/sleep.h"
-#include "third_party/webrtc/system_wrappers/interface/tick_util.h"
-static const int64 kMaxFrameWaitMs = 20;
-const int64 kMinSchedulingDelayMs = 1;
+// Max time we wait until an audio frame is due to be played out is released.
+static const int64 kMaxAudioFrameWaitMs = 20;
+static const int64 kMinSchedulingDelayMs = 1;
namespace media {
namespace cast {
+DecodedAudioCallbackData::DecodedAudioCallbackData()
+ : number_of_10ms_blocks(0),
+ desired_frequency(0),
+ callback() {}
+
+DecodedAudioCallbackData::~DecodedAudioCallbackData() {}
// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
// Used to pass payload data into the audio receiver.
class LocalRtpAudioData : public RtpData {
public:
explicit LocalRtpAudioData(AudioReceiver* audio_receiver)
- : audio_receiver_(audio_receiver),
- time_first_incoming_packet_(),
- first_incoming_rtp_timestamp_(0),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {}
+ : audio_receiver_(audio_receiver) {}
virtual void OnReceivedPayloadData(
const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader* rtp_header) OVERRIDE {
- if (time_first_incoming_packet_.is_null()) {
- first_incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_first_incoming_packet_ = clock_->NowTicks();
- }
audio_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
*rtp_header);
}
- void GetFirstPacketInformation(base::TimeTicks* time_incoming_packet,
- uint32* incoming_rtp_timestamp) {
- *time_incoming_packet = time_first_incoming_packet_;
- *incoming_rtp_timestamp = first_incoming_rtp_timestamp_;
- }
-
private:
AudioReceiver* audio_receiver_;
- base::TimeTicks time_first_incoming_packet_;
- uint32 first_incoming_rtp_timestamp_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
};
// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
@@ -71,10 +59,6 @@ class LocalRtpAudioFeedback : public RtpPayloadFeedback {
audio_receiver_->CastFeedback(cast_message);
}
- virtual void RequestKeyFrame() OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
private:
AudioReceiver* audio_receiver_;
};
@@ -99,83 +83,151 @@ class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
RtpReceiver* rtp_receiver_;
};
-
-AudioReceiver::AudioReceiver(scoped_refptr<CastThread> cast_thread,
+AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
PacedPacketSender* const packet_sender)
- : cast_thread_(cast_thread),
+ : cast_environment_(cast_environment),
codec_(audio_config.codec),
- incoming_ssrc_(audio_config.incoming_ssrc),
frequency_(audio_config.frequency),
audio_buffer_(),
audio_decoder_(),
time_offset_(),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()),
weak_factory_(this) {
target_delay_delta_ =
base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms);
incoming_payload_callback_.reset(new LocalRtpAudioData(this));
incoming_payload_feedback_.reset(new LocalRtpAudioFeedback(this));
if (audio_config.use_external_decoder) {
- audio_buffer_.reset(new Framer(incoming_payload_feedback_.get(),
- audio_config.incoming_ssrc,
- true,
- 0));
+ audio_buffer_.reset(new Framer(cast_environment->Clock(),
+ incoming_payload_feedback_.get(),
+ audio_config.incoming_ssrc,
+ true,
+ 0));
} else {
- audio_decoder_ = new AudioDecoder(cast_thread_, audio_config);
+ audio_decoder_.reset(new AudioDecoder(cast_environment,
+ audio_config,
+ incoming_payload_feedback_.get()));
+ }
+ if (audio_config.aes_iv_mask.size() == kAesKeySize &&
+ audio_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = audio_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, audio_config.aes_key);
+ decryptor_.reset(new crypto::Encryptor());
+ decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (audio_config.aes_iv_mask.size() != 0 ||
+ audio_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
}
- rtp_receiver_.reset(new RtpReceiver(&audio_config,
- NULL,
- incoming_payload_callback_.get()));
+
+ rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(),
+ &audio_config,
+ NULL,
+ incoming_payload_callback_.get()));
rtp_audio_receiver_statistics_.reset(
new LocalRtpReceiverStatistics(rtp_receiver_.get()));
base::TimeDelta rtcp_interval_delta =
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
- rtcp_.reset(new Rtcp(NULL,
- packet_sender,
- NULL,
- rtp_audio_receiver_statistics_.get(),
- audio_config.rtcp_mode,
- rtcp_interval_delta,
- false,
- audio_config.feedback_ssrc,
- audio_config.rtcp_c_name));
- rtcp_->SetRemoteSSRC(audio_config.incoming_ssrc);
- ScheduleNextRtcpReport();
+ rtcp_.reset(new Rtcp(cast_environment,
+ NULL,
+ packet_sender,
+ NULL,
+ rtp_audio_receiver_statistics_.get(),
+ audio_config.rtcp_mode,
+ rtcp_interval_delta,
+ audio_config.feedback_ssrc,
+ audio_config.incoming_ssrc,
+ audio_config.rtcp_c_name));
}
AudioReceiver::~AudioReceiver() {}
+void AudioReceiver::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+}
+
void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
+ rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
+ rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+
+ // TODO(pwestin): update this as video to refresh over time.
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (time_first_incoming_packet_.is_null()) {
+ InitializeTimers();
+ first_incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
+ time_first_incoming_packet_ = cast_environment_->Clock()->NowTicks();
+ }
+
if (audio_decoder_) {
DCHECK(!audio_buffer_) << "Invalid internal state";
- audio_decoder_->IncomingParsedRtpPacket(payload_data, payload_size,
- rtp_header);
+ std::string plaintext(reinterpret_cast<const char*>(payload_data),
+ payload_size);
+ if (decryptor_) {
+ plaintext.clear();
+ if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return;
+ }
+ if (!decryptor_->Decrypt(base::StringPiece(reinterpret_cast<const char*>(
+ payload_data), payload_size), &plaintext)) {
+ VLOG(0) << "Decryption error";
+ return;
+ }
+ }
+ audio_decoder_->IncomingParsedRtpPacket(
+ reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(),
+ rtp_header);
+ if (!queued_decoded_callbacks_.empty()) {
+ DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front();
+ queued_decoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
+ base::Bind(&AudioReceiver::DecodeAudioFrameThread,
+ base::Unretained(this),
+ decoded_data.number_of_10ms_blocks,
+ decoded_data.desired_frequency,
+ decoded_data.callback));
+ }
return;
}
- if (audio_buffer_) {
- DCHECK(!audio_decoder_) << "Invalid internal state";
- audio_buffer_->InsertPacket(payload_data, payload_size, rtp_header);
- }
+
+ DCHECK(audio_buffer_) << "Invalid internal state";
+ DCHECK(!audio_decoder_) << "Invalid internal state";
+
+ bool complete = audio_buffer_->InsertPacket(payload_data, payload_size,
+ rtp_header);
+ if (!complete) return; // Audio frame not complete; wait for more packets.
+ if (queued_encoded_callbacks_.empty()) return;
+ AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front();
+ queued_encoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::GetEncodedAudioFrame,
+ weak_factory_.GetWeakPtr(), callback));
}
void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency,
- const AudioFrameDecodedCallback callback) {
+ int desired_frequency, const AudioFrameDecodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_decoder_) << "Invalid function call in this configuration";
-
- cast_thread_->PostTask(CastThread::AUDIO_DECODER, FROM_HERE, base::Bind(
- &AudioReceiver::DecodeAudioFrameThread, weak_factory_.GetWeakPtr(),
- number_of_10ms_blocks, desired_frequency, callback));
+ // TODO(pwestin): we can skip this function by posting direct to the decoder.
+ cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
+ base::Bind(&AudioReceiver::DecodeAudioFrameThread,
+ base::Unretained(this),
+ number_of_10ms_blocks,
+ desired_frequency,
+ callback));
}
void AudioReceiver::DecodeAudioFrameThread(
int number_of_10ms_blocks,
int desired_frequency,
const AudioFrameDecodedCallback callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER));
// TODO(mikhal): Allow the application to allocate this memory.
scoped_ptr<PcmAudioFrame> audio_frame(new PcmAudioFrame());
@@ -184,121 +236,255 @@ void AudioReceiver::DecodeAudioFrameThread(
desired_frequency,
audio_frame.get(),
&rtp_timestamp)) {
+ DecodedAudioCallbackData callback_data;
+ callback_data.number_of_10ms_blocks = number_of_10ms_blocks;
+ callback_data.desired_frequency = desired_frequency;
+ callback_data.callback = callback;
+ queued_decoded_callbacks_.push_back(callback_data);
return;
}
- base::TimeTicks now = clock_->NowTicks();
- base::TimeTicks playout_time;
- playout_time = GetPlayoutTime(now, rtp_timestamp);
-
- // Frame is ready - Send back to the main thread.
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
- base::Bind(callback,
- base::Passed(&audio_frame), playout_time));
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay,
+ base::Unretained(this), base::Passed(&audio_frame), rtp_timestamp,
+ callback));
}
-bool AudioReceiver::GetEncodedAudioFrame(EncodedAudioFrame* encoded_frame,
- base::TimeTicks* playout_time) {
+void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay(
+ scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
+ const AudioFrameDecodedCallback callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertFrameEvent(kAudioFrameDecoded,
+ rtp_timestamp, kFrameIdUnknown);
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
+
+ cast_environment_->Logging()->InsertFrameEventWithDelay(kAudioPlayoutDelay,
+ rtp_timestamp, kFrameIdUnknown, playout_time - now);
+
+ // Frame is ready - Send back to the caller.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(&audio_frame), playout_time));
+}
+
+void AudioReceiver::PlayoutTimeout() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+ if (queued_encoded_callbacks_.empty()) {
+ // Already released by incoming packet.
+ return;
+ }
+ uint32 rtp_timestamp = 0;
+ bool next_frame = false;
+ scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame());
+
+ if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
+ &rtp_timestamp, &next_frame)) {
+ // We have no audio frames. Wait for new packet(s).
+ // Since the application can post multiple AudioFrameEncodedCallback and
+ // we only check the next frame to play out we might have multiple timeout
+ // events firing after each other; however this should be a rare event.
+ VLOG(1) << "Failed to retrieved a complete frame at this point in time";
+ return;
+ }
+
+ if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ // Logging already done.
+ return;
+ }
+
+ if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), rtp_timestamp,
+ next_frame, &encoded_frame)) {
+ // Call succeed remove callback from list.
+ queued_encoded_callbacks_.pop_front();
+ }
+}
+
+void AudioReceiver::GetEncodedAudioFrame(
+ const AudioFrameEncodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
uint32 rtp_timestamp = 0;
bool next_frame = false;
- base::TimeTicks timeout = clock_->NowTicks() +
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
- if (!audio_buffer_->GetEncodedAudioFrame(timeout, encoded_frame,
+ scoped_ptr<EncodedAudioFrame> encoded_frame(new EncodedAudioFrame());
+
+ if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
&rtp_timestamp, &next_frame)) {
- return false;
+ // We have no audio frames. Wait for new packet(s).
+ VLOG(1) << "Wait for more audio packets in frame";
+ queued_encoded_callbacks_.push_back(callback);
+ return;
}
- base::TimeTicks now = clock_->NowTicks();
- *playout_time = GetPlayoutTime(now, rtp_timestamp);
-
- base::TimeDelta time_until_playout = now - *playout_time;
- base::TimeDelta time_until_release = time_until_playout -
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
- base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
- if (!next_frame && (time_until_release > zero_delta)) {
- // Relying on the application to keep polling.
- return false;
+ if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ // Logging already done.
+ queued_encoded_callbacks_.push_back(callback);
+ return;
+ }
+ if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame,
+ &encoded_frame)) {
+ // We have an audio frame; however we are missing packets and we have time
+ // to wait for new packet(s).
+ queued_encoded_callbacks_.push_back(callback);
}
- encoded_frame->codec = codec_;
- return true;
}
-void AudioReceiver::ReleaseFrame(uint8 frame_id) {
- audio_buffer_->ReleaseFrame(frame_id);
+bool AudioReceiver::PostEncodedAudioFrame(
+ const AudioFrameEncodedCallback& callback,
+ uint32 rtp_timestamp,
+ bool next_frame,
+ scoped_ptr<EncodedAudioFrame>* encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
+ base::TimeDelta time_until_playout = playout_time - now;
+ base::TimeDelta min_wait_delta =
+ base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs);
+
+ if (!next_frame && (time_until_playout > min_wait_delta)) {
+ base::TimeDelta time_until_release = time_until_playout - min_wait_delta;
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
+ time_until_release);
+ VLOG(1) << "Wait until time to playout:"
+ << time_until_release.InMilliseconds();
+ return false;
+ }
+ (*encoded_frame)->codec = codec_;
+ audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id);
+
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(encoded_frame), playout_time));
+ return true;
}
-void AudioReceiver::IncomingPacket(const uint8* packet, int length) {
+void AudioReceiver::IncomingPacket(const uint8* packet, size_t length,
+ const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
bool rtcp_packet = Rtcp::IsRtcpPacket(packet, length);
if (!rtcp_packet) {
rtp_receiver_->ReceivedPacket(packet, length);
} else {
rtcp_->IncomingRtcpPacket(packet, length);
}
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- rtcp_->SendRtcpCast(cast_message);
+ // TODO(pwestin): add logging.
+ rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
}
base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
uint32 rtp_timestamp) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Senders time in ms when this frame was recorded.
// Note: the senders clock and our local clock might not be synced.
base::TimeTicks rtp_timestamp_in_ticks;
- base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
- if (time_offset_ == zero_delta) {
- base::TimeTicks time_first_incoming_packet;
- uint32 first_incoming_rtp_timestamp;
-
- incoming_payload_callback_->GetFirstPacketInformation(
- &time_first_incoming_packet, &first_incoming_rtp_timestamp);
-
+ if (time_offset_ == base::TimeDelta()) {
if (rtcp_->RtpTimestampInSenderTime(frequency_,
- first_incoming_rtp_timestamp,
+ first_incoming_rtp_timestamp_,
&rtp_timestamp_in_ticks)) {
- time_offset_ = time_first_incoming_packet - rtp_timestamp_in_ticks;
+ time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks;
} else {
// We have not received any RTCP to sync the stream play it out as soon as
// possible.
- uint32 rtp_timestamp_diff =
- rtp_timestamp - first_incoming_rtp_timestamp;
+ uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_;
int frequency_khz = frequency_ / 1000;
base::TimeDelta rtp_time_diff_delta =
base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz);
- base::TimeDelta time_diff_delta = now - time_first_incoming_packet;
- if (rtp_time_diff_delta > time_diff_delta) {
- return (now + (rtp_time_diff_delta - time_diff_delta));
- } else {
- return now;
- }
+ base::TimeDelta time_diff_delta = now - time_first_incoming_packet_;
+
+ return now + std::max(rtp_time_diff_delta - time_diff_delta,
+ base::TimeDelta());
}
}
// This can fail if we have not received any RTCP packets in a long time.
- if (rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
- &rtp_timestamp_in_ticks)) {
- return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
- } else {
- return now;
+ return rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
+ &rtp_timestamp_in_ticks) ?
+ rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ :
+ now;
+}
+
+bool AudioReceiver::DecryptAudioFrame(
+ scoped_ptr<EncodedAudioFrame>* audio_frame) {
+ DCHECK(decryptor_) << "Invalid state";
+
+ if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id,
+ iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ std::string decrypted_audio_data;
+ if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) {
+ VLOG(0) << "Decryption error";
+ // Give up on this frame, release it from jitter buffer.
+ audio_buffer_->ReleaseFrame((*audio_frame)->frame_id);
+ return false;
}
+ (*audio_frame)->data.swap(decrypted_audio_data);
+ return true;
}
void AudioReceiver::ScheduleNextRtcpReport() {
- base::TimeDelta time_to_send =
- rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
time_to_send = std::max(time_to_send,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioReceiver::SendNextRtcpReport,
weak_factory_.GetWeakPtr()), time_to_send);
}
void AudioReceiver::SendNextRtcpReport() {
- rtcp_->SendRtcpReport(incoming_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // TODO(pwestin): add logging.
+ rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
+// Cast messages should be sent within a maximum interval. Schedule a call
+// if not triggered elsewhere, e.g. by the cast message_builder.
+void AudioReceiver::ScheduleNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeTicks send_time;
+ if (audio_buffer_) {
+ audio_buffer_->TimeToSendNextCastMessage(&send_time);
+ } else if (audio_decoder_) {
+ audio_decoder_->TimeToSendNextCastMessage(&send_time);
+ } else {
+ NOTREACHED();
+ }
+ base::TimeDelta time_to_send = send_time -
+ cast_environment_->Clock()->NowTicks();
+ time_to_send = std::max(time_to_send,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::SendNextCastMessage,
+ weak_factory_.GetWeakPtr()), time_to_send);
+}
+
+void AudioReceiver::SendNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ if (audio_buffer_) {
+ // Will only send a message if it is time.
+ audio_buffer_->SendCastMessage();
+ }
+ if (audio_decoder_) {
+ // Will only send a message if it is time.
+ audio_decoder_->SendCastMessage();
+ }
+ ScheduleNextCastMessage();
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.gypi b/chromium/media/cast/audio_receiver/audio_receiver.gypi
index 240f742b899..a851612f721 100644
--- a/chromium/media/cast/audio_receiver/audio_receiver.gypi
+++ b/chromium/media/cast/audio_receiver/audio_receiver.gypi
@@ -8,8 +8,8 @@
'target_name': 'cast_audio_receiver',
'type': 'static_library',
'include_dirs': [
- '<(DEPTH)/',
- '<(DEPTH)/third_party/',
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
'<(DEPTH)/third_party/webrtc/',
],
'sources': [
@@ -19,10 +19,11 @@
'audio_receiver.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
'<(DEPTH)/media/cast/rtp_receiver/rtp_receiver.gyp:*',
'<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
],
},
],
-} \ No newline at end of file
+}
diff --git a/chromium/media/cast/audio_receiver/audio_receiver.h b/chromium/media/cast/audio_receiver/audio_receiver.h
index 9a1f138efc6..c49e1c15c25 100644
--- a/chromium/media/cast/audio_receiver/audio_receiver.h
+++ b/chromium/media/cast/audio_receiver/audio_receiver.h
@@ -11,14 +11,17 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/cast_thread.h"
#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
-#include "media/cast/rtp_common/rtp_defines.h" // RtpCastHeader
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h" // RtpCastHeader
+
+namespace crypto {
+ class Encryptor;
+}
namespace media {
namespace cast {
@@ -31,12 +34,20 @@ class PacedPacketSender;
class RtpReceiver;
class RtpReceiverStatistics;
+struct DecodedAudioCallbackData {
+ DecodedAudioCallbackData();
+ ~DecodedAudioCallbackData();
+ int number_of_10ms_blocks;
+ int desired_frequency;
+ AudioFrameDecodedCallback callback;
+};
+
// This class is not thread safe. Should only be called from the Main cast
// thread.
class AudioReceiver : public base::NonThreadSafe,
public base::SupportsWeakPtr<AudioReceiver> {
public:
- AudioReceiver(scoped_refptr<CastThread> cast_thread,
+ AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
PacedPacketSender* const packet_sender);
@@ -46,28 +57,18 @@ class AudioReceiver : public base::NonThreadSafe,
// Actual decoding will be preformed on a designated audio_decoder thread.
void GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency,
- const AudioFrameDecodedCallback callback);
+ const AudioFrameDecodedCallback& callback);
// Extract an encoded audio frame from the cast receiver.
- bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
- base::TimeTicks* playout_time);
+ void GetEncodedAudioFrame(const AudioFrameEncodedCallback& callback);
- // Release frame - should be called following a GetCodedAudioFrame call.
// Should only be called from the main cast thread.
- void ReleaseFrame(uint8 frame_id);
-
- // Should only be called from the main cast thread.
- void IncomingPacket(const uint8* packet, int length);
-
- // Only used for testing.
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- rtcp_->set_clock(clock);
- }
+ void IncomingPacket(const uint8* packet, size_t length,
+ const base::Closure callback);
protected:
void IncomingParsedRtpPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header);
private:
friend class LocalRtpAudioData;
@@ -75,15 +76,31 @@ class AudioReceiver : public base::NonThreadSafe,
void CastFeedback(const RtcpCastMessage& cast_message);
+ // Time to pull out the audio even though we are missing data.
+ void PlayoutTimeout();
+
+ bool PostEncodedAudioFrame(const AudioFrameEncodedCallback& callback,
+ uint32 rtp_timestamp,
+ bool next_frame,
+ scoped_ptr<EncodedAudioFrame>* encoded_frame);
+
// Actual decoding implementation - should be called under the audio decoder
// thread.
void DecodeAudioFrameThread(int number_of_10ms_blocks,
int desired_frequency,
const AudioFrameDecodedCallback callback);
+ void ReturnDecodedFrameWithPlayoutDelay(
+ scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
+ const AudioFrameDecodedCallback callback);
// Return the playout time based on the current time and rtp timestamp.
- base::TimeTicks GetPlayoutTime(base::TimeTicks now,
- uint32 rtp_timestamp);
+ base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
+
+ void InitializeTimers();
+
+ // Decrypts the data within the |audio_frame| and replaces the data with the
+ // decrypted string.
+ bool DecryptAudioFrame(scoped_ptr<EncodedAudioFrame>* audio_frame);
// Schedule the next RTCP report.
void ScheduleNextRtcpReport();
@@ -91,27 +108,36 @@ class AudioReceiver : public base::NonThreadSafe,
// Actually send the next RTCP report.
void SendNextRtcpReport();
- scoped_refptr<CastThread> cast_thread_;
+ // Schedule timing for the next cast message.
+ void ScheduleNextCastMessage();
+
+ // Actually send the next cast message.
+ void SendNextCastMessage();
+
+ scoped_refptr<CastEnvironment> cast_environment_;
base::WeakPtrFactory<AudioReceiver> weak_factory_;
const AudioCodec codec_;
- const uint32 incoming_ssrc_;
const int frequency_;
base::TimeDelta target_delay_delta_;
scoped_ptr<Framer> audio_buffer_;
- scoped_refptr<AudioDecoder> audio_decoder_;
+ scoped_ptr<AudioDecoder> audio_decoder_;
scoped_ptr<LocalRtpAudioData> incoming_payload_callback_;
scoped_ptr<LocalRtpAudioFeedback> incoming_payload_feedback_;
scoped_ptr<RtpReceiver> rtp_receiver_;
scoped_ptr<Rtcp> rtcp_;
scoped_ptr<RtpReceiverStatistics> rtp_audio_receiver_statistics_;
base::TimeDelta time_offset_;
+ base::TimeTicks time_first_incoming_packet_;
+ uint32 first_incoming_rtp_timestamp_;
+ scoped_ptr<crypto::Encryptor> decryptor_;
+ std::string iv_mask_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
+ std::list<AudioFrameEncodedCallback> queued_encoded_callbacks_;
+ std::list<DecodedAudioCallbackData> queued_decoded_callbacks_;
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_ \ No newline at end of file
+#endif // MEDIA_CAST_AUDIO_RECEIVER_AUDIO_RECEIVER_H_
diff --git a/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc b/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
index 0cb564b1a40..a10af679925 100644
--- a/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ b/chromium/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -8,24 +8,59 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/audio_receiver/audio_receiver.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
-static const int kPacketSize = 1500;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+
+namespace {
+class TestAudioEncoderCallback :
+ public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
+ public:
+ TestAudioEncoderCallback()
+ : num_called_(0) {}
+
+ void SetExpectedResult(uint8 expected_frame_id,
+ const base::TimeTicks& expected_playout_time) {
+ expected_frame_id_ = expected_frame_id;
+ expected_playout_time_ = expected_playout_time;
+ }
+
+ void DeliverEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
+ const base::TimeTicks& playout_time) {
+ EXPECT_EQ(expected_frame_id_, audio_frame->frame_id);
+ EXPECT_EQ(kPcm16, audio_frame->codec);
+ EXPECT_EQ(expected_playout_time_, playout_time);
+ num_called_++;
+ }
+
+ int number_times_called() const { return num_called_;}
+
+ protected:
+ virtual ~TestAudioEncoderCallback() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<TestAudioEncoderCallback>;
+
+ int num_called_;
+ uint8 expected_frame_id_;
+ base::TimeTicks expected_playout_time_;
+};
+} // namespace
class PeerAudioReceiver : public AudioReceiver {
public:
- PeerAudioReceiver(scoped_refptr<CastThread> cast_thread,
+ PeerAudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
PacedPacketSender* const packet_sender)
- : AudioReceiver(cast_thread, audio_config, packet_sender) {
- }
+ : AudioReceiver(cast_environment, audio_config, packet_sender) {}
+
using AudioReceiver::IncomingParsedRtpPacket;
};
@@ -38,53 +73,140 @@ class AudioReceiverTest : public ::testing::Test {
audio_config_.channels = 1;
audio_config_.codec = kPcm16;
audio_config_.use_external_decoder = false;
+ audio_config_.feedback_ssrc = 1234;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ test_audio_encoder_callback_ = new TestAudioEncoderCallback();
}
void Configure(bool use_external_decoder) {
audio_config_.use_external_decoder = use_external_decoder;
- receiver_.reset(new
- PeerAudioReceiver(cast_thread_, audio_config_, &mock_transport_));
- receiver_->set_clock(&testing_clock_);
+ receiver_.reset(new PeerAudioReceiver(cast_environment_, audio_config_,
+ &mock_transport_));
}
- ~AudioReceiverTest() {}
+ virtual ~AudioReceiverTest() {}
+
+ static void DummyDeletePacket(const uint8* packet) {};
virtual void SetUp() {
- payload_.assign(kPacketSize, 0);
- // Always start with a key frame.
+ payload_.assign(kIpPacketSize, 0);
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
rtp_header_.is_reference = false;
rtp_header_.reference_frame_id = 0;
+ rtp_header_.webrtc.header.timestamp = 0;
}
AudioReceiverConfig audio_config_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
+ base::SimpleTestTickClock testing_clock_;
MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_ptr<PeerAudioReceiver> receiver_;
- scoped_refptr<CastThread> cast_thread_;
- base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<TestAudioEncoderCallback> test_audio_encoder_callback_;
};
TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
Configure(true);
- receiver_->IncomingParsedRtpPacket(
- payload_.data(), payload_.size(), rtp_header_);
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).Times(1);
+
+ receiver_->IncomingParsedRtpPacket(payload_.data(),
+ payload_.size(), rtp_header_);
EncodedAudioFrame audio_frame;
base::TimeTicks playout_time;
- EXPECT_TRUE(receiver_->GetEncodedAudioFrame(&audio_frame, &playout_time));
- EXPECT_EQ(0, audio_frame.frame_id);
- EXPECT_EQ(kPcm16, audio_frame.codec);
+ test_audio_encoder_callback_->SetExpectedResult(0, testing_clock_.NowTicks());
+
+ AudioFrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
+ test_audio_encoder_callback_.get());
+
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+}
+
+TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
+ Configure(true);
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).WillRepeatedly(
+ testing::Return(true));
+
+ AudioFrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
+ test_audio_encoder_callback_.get());
+
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
+
+ EncodedAudioFrame audio_frame;
+ base::TimeTicks playout_time;
+ test_audio_encoder_callback_->SetExpectedResult(0, testing_clock_.NowTicks());
+
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+
+ TestRtcpPacketBuilder rtcp_packet;
+
+ uint32 ntp_high;
+ uint32 ntp_low;
+ ConvertTimeTicksToNtp(testing_clock_.NowTicks(), &ntp_high, &ntp_low);
+ rtcp_packet.AddSrWithNtp(audio_config_.feedback_ssrc, ntp_high, ntp_low,
+ rtp_header_.webrtc.header.timestamp);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(20));
+
+ receiver_->IncomingPacket(rtcp_packet.Packet(), rtcp_packet.Length(),
+ base::Bind(AudioReceiverTest::DummyDeletePacket, rtcp_packet.Packet()));
+
+ // Make sure that we are not continuous and that the RTP timestamp represent a
+ // time in the future.
+ rtp_header_.is_key_frame = false;
+ rtp_header_.frame_id = 2;
+ rtp_header_.is_reference = true;
+ rtp_header_.reference_frame_id = 0;
+ rtp_header_.webrtc.header.timestamp = 960;
+ test_audio_encoder_callback_->SetExpectedResult(2,
+ testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(100));
+
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+ task_runner_->RunTasks();
+
+ // Frame 2 should not come out at this point in time.
+ EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
+
+ // Through on one more pending callback.
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+
+ task_runner_->RunTasks();
+ EXPECT_EQ(2, test_audio_encoder_callback_->number_times_called());
+
+ test_audio_encoder_callback_->SetExpectedResult(3, testing_clock_.NowTicks());
+
+ // Through on one more pending audio frame.
+ rtp_header_.frame_id = 3;
+ rtp_header_.is_reference = false;
+ rtp_header_.reference_frame_id = 0;
+ rtp_header_.webrtc.header.timestamp = 1280;
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
+
+ receiver_->GetEncodedAudioFrame(frame_encoded_callback);
task_runner_->RunTasks();
+ EXPECT_EQ(3, test_audio_encoder_callback_->number_times_called());
}
// TODO(mikhal): Add encoded frames.
diff --git a/chromium/media/cast/audio_sender/audio_encoder.cc b/chromium/media/cast/audio_sender/audio_encoder.cc
index 175f82b3124..a82d1de39a5 100644
--- a/chromium/media/cast/audio_sender/audio_encoder.cc
+++ b/chromium/media/cast/audio_sender/audio_encoder.cc
@@ -4,169 +4,289 @@
#include "media/cast/audio_sender/audio_encoder.h"
+#include <algorithm>
+
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/sys_byteorder.h"
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
-#include "third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
-#include "third_party/webrtc/modules/interface/module_common_types.h"
+#include "media/cast/cast_environment.h"
+#include "third_party/opus/src/include/opus.h"
namespace media {
namespace cast {
-// 48KHz, 2 channels and 100 ms.
-static const int kMaxNumberOfSamples = 48 * 2 * 100;
+void LogAudioEncodedEvent(CastEnvironment* const cast_environment,
+ const base::TimeTicks& recorded_time) {
+ // TODO(mikhal): Resolve timestamp calculation for audio.
+ cast_environment->Logging()->InsertFrameEvent(kAudioFrameEncoded,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+}
-// This class is only called from the cast audio encoder thread.
-class WebrtEncodedDataCallback : public webrtc::AudioPacketizationCallback {
+// Base class that handles the common problem of feeding one or more AudioBus'
+// data into a 10 ms buffer and then, once the buffer is full, encoding the
+// signal and emitting an EncodedAudioFrame via the FrameEncodedCallback.
+//
+// Subclasses complete the implementation by handling the actual encoding
+// details.
+class AudioEncoder::ImplBase {
public:
- WebrtEncodedDataCallback(scoped_refptr<CastThread> cast_thread,
- AudioCodec codec,
- int frequency)
- : codec_(codec),
- frequency_(frequency),
- cast_thread_(cast_thread),
- last_timestamp_(0) {}
-
- virtual int32 SendData(
- webrtc::FrameType /*frame_type*/,
- uint8 /*payload_type*/,
- uint32 timestamp,
- const uint8* payload_data,
- uint16 payload_size,
- const webrtc::RTPFragmentationHeader* /*fragmentation*/) {
- scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
- audio_frame->codec = codec_;
- audio_frame->samples = timestamp - last_timestamp_;
- DCHECK(audio_frame->samples <= kMaxNumberOfSamples);
- last_timestamp_ = timestamp;
- audio_frame->data.insert(audio_frame->data.begin(),
- payload_data,
- payload_data + payload_size);
-
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
- base::Bind(*frame_encoded_callback_, base::Passed(&audio_frame),
- recorded_time_));
- return 0;
+ ImplBase(CastEnvironment* cast_environment,
+ AudioCodec codec, int num_channels, int sampling_rate,
+ const FrameEncodedCallback& callback)
+ : cast_environment_(cast_environment),
+ codec_(codec), num_channels_(num_channels),
+ samples_per_10ms_(sampling_rate / 100),
+ callback_(callback),
+ buffer_fill_end_(0),
+ frame_id_(0) {
+ CHECK_GT(num_channels_, 0);
+ CHECK_GT(samples_per_10ms_, 0);
+ CHECK_EQ(sampling_rate % 100, 0);
+ CHECK_LE(samples_per_10ms_ * num_channels_,
+ EncodedAudioFrame::kMaxNumberOfSamples);
}
- void SetEncodedCallbackInfo(
- const base::TimeTicks& recorded_time,
- const AudioEncoder::FrameEncodedCallback* frame_encoded_callback) {
- recorded_time_ = recorded_time;
- frame_encoded_callback_ = frame_encoded_callback;
+ virtual ~ImplBase() {}
+
+ void EncodeAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) {
+ int src_pos = 0;
+ while (src_pos < audio_bus->frames()) {
+ const int num_samples_to_xfer =
+ std::min(samples_per_10ms_ - buffer_fill_end_,
+ audio_bus->frames() - src_pos);
+ DCHECK_EQ(audio_bus->channels(), num_channels_);
+ TransferSamplesIntoBuffer(
+ audio_bus, src_pos, buffer_fill_end_, num_samples_to_xfer);
+ src_pos += num_samples_to_xfer;
+ buffer_fill_end_ += num_samples_to_xfer;
+
+ if (src_pos == audio_bus->frames()) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ done_callback);
+ // Note: |audio_bus| is now invalid..
+ }
+
+ if (buffer_fill_end_ == samples_per_10ms_) {
+ scoped_ptr<EncodedAudioFrame> audio_frame(new EncodedAudioFrame());
+ audio_frame->codec = codec_;
+ audio_frame->frame_id = frame_id_++;
+ audio_frame->samples = samples_per_10ms_;
+ if (EncodeFromFilledBuffer(&audio_frame->data)) {
+ // Compute an offset to determine the recorded time for the first
+ // audio sample in the buffer.
+ const base::TimeDelta buffer_time_offset =
+ (buffer_fill_end_ - src_pos) *
+ base::TimeDelta::FromMilliseconds(10) / samples_per_10ms_;
+ // TODO(miu): Consider batching EncodedAudioFrames so we only post a
+ // at most one task for each call to this method.
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback_, base::Passed(&audio_frame),
+ recorded_time - buffer_time_offset));
+ }
+ buffer_fill_end_ = 0;
+ }
+ }
}
- private:
+ protected:
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) = 0;
+ virtual bool EncodeFromFilledBuffer(std::string* out) = 0;
+
+ CastEnvironment* const cast_environment_;
const AudioCodec codec_;
- const int frequency_;
- scoped_refptr<CastThread> cast_thread_;
- uint32 last_timestamp_;
- base::TimeTicks recorded_time_;
- const AudioEncoder::FrameEncodedCallback* frame_encoded_callback_;
+ const int num_channels_;
+ const int samples_per_10ms_;
+ const FrameEncodedCallback callback_;
+
+ private:
+ // In the case where a call to EncodeAudio() cannot completely fill the
+ // buffer, this points to the position at which to populate data in a later
+ // call.
+ int buffer_fill_end_;
+
+ // A counter used to label EncodedAudioFrames.
+ uint32 frame_id_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ImplBase);
+};
+
+class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
+ public:
+ OpusImpl(CastEnvironment* cast_environment,
+ int num_channels, int sampling_rate, int bitrate,
+ const FrameEncodedCallback& callback)
+ : ImplBase(cast_environment, kOpus, num_channels, sampling_rate,
+ callback),
+ encoder_memory_(new uint8[opus_encoder_get_size(num_channels)]),
+ opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
+ buffer_(new float[num_channels * samples_per_10ms_]) {
+ CHECK_EQ(opus_encoder_init(opus_encoder_, sampling_rate, num_channels,
+ OPUS_APPLICATION_AUDIO),
+ OPUS_OK);
+ if (bitrate <= 0) {
+ // Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a
+ // variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms
+ // frame size. The opus library authors may, of course, adjust this in
+ // later versions.
+ bitrate = OPUS_AUTO;
+ }
+ CHECK_EQ(opus_encoder_ctl(opus_encoder_, OPUS_SET_BITRATE(bitrate)),
+ OPUS_OK);
+ }
+
+ virtual ~OpusImpl() {}
+
+ private:
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) OVERRIDE {
+ // Opus requires channel-interleaved samples in a single array.
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ const float* src = audio_bus->channel(ch) + source_offset;
+ const float* const src_end = src + num_samples;
+ float* dest = buffer_.get() + buffer_fill_offset * num_channels_ + ch;
+ for (; src < src_end; ++src, dest += num_channels_)
+ *dest = *src;
+ }
+ }
+
+ virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ out->resize(kOpusMaxPayloadSize);
+ const opus_int32 result = opus_encode_float(
+ opus_encoder_, buffer_.get(), samples_per_10ms_,
+ reinterpret_cast<uint8*>(&out->at(0)), kOpusMaxPayloadSize);
+ if (result > 1) {
+ out->resize(result);
+ return true;
+ } else if (result < 0) {
+ LOG(ERROR) << "Error code from opus_encode_float(): " << result;
+ return false;
+ } else {
+ // Do nothing: The documentation says that a return value of zero or
+ // one byte means the packet does not need to be transmitted.
+ return false;
+ }
+ }
+
+ const scoped_ptr<uint8[]> encoder_memory_;
+ OpusEncoder* const opus_encoder_;
+ const scoped_ptr<float[]> buffer_;
+
+ // This is the recommended value, according to documentation in
+ // third_party/opus/src/include/opus.h, so that the Opus encoder does not
+ // degrade the audio due to memory constraints.
+ //
+ // Note: Whereas other RTP implementations do not, the cast library is
+ // perfectly capable of transporting larger than MTU-sized audio frames.
+ static const int kOpusMaxPayloadSize = 4000;
+
+ DISALLOW_COPY_AND_ASSIGN(OpusImpl);
};
-AudioEncoder::AudioEncoder(scoped_refptr<CastThread> cast_thread,
- const AudioSenderConfig& audio_config)
- : cast_thread_(cast_thread),
- audio_encoder_(webrtc::AudioCodingModule::Create(0)),
- webrtc_encoder_callback_(
- new WebrtEncodedDataCallback(cast_thread, audio_config.codec,
- audio_config.frequency)),
- timestamp_(0) { // Must start at 0; used above.
-
- if (audio_encoder_->InitializeSender() != 0) {
- DCHECK(false) << "Invalid webrtc return value";
+class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
+ public:
+ Pcm16Impl(CastEnvironment* cast_environment,
+ int num_channels, int sampling_rate,
+ const FrameEncodedCallback& callback)
+ : ImplBase(cast_environment, kPcm16, num_channels, sampling_rate,
+ callback),
+ buffer_(new int16[num_channels * samples_per_10ms_]) {}
+
+ virtual ~Pcm16Impl() {}
+
+ private:
+ virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) OVERRIDE {
+ audio_bus->ToInterleavedPartial(
+ source_offset, num_samples, sizeof(int16),
+ buffer_.get() + buffer_fill_offset * num_channels_);
}
- if (audio_encoder_->RegisterTransportCallback(
- webrtc_encoder_callback_.get()) != 0) {
- DCHECK(false) << "Invalid webrtc return value";
+
+ virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ // Output 16-bit PCM integers in big-endian byte order.
+ out->resize(num_channels_ * samples_per_10ms_ * sizeof(int16));
+ const int16* src = buffer_.get();
+ const int16* const src_end = src + num_channels_ * samples_per_10ms_;
+ uint16* dest = reinterpret_cast<uint16*>(&out->at(0));
+ for (; src < src_end; ++src, ++dest)
+ *dest = base::HostToNet16(*src);
+ return true;
}
- webrtc::CodecInst send_codec;
- send_codec.pltype = audio_config.rtp_payload_type;
- send_codec.plfreq = audio_config.frequency;
- send_codec.channels = audio_config.channels;
+
+ private:
+ const scoped_ptr<int16[]> buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Pcm16Impl);
+};
+
+AudioEncoder::AudioEncoder(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioSenderConfig& audio_config,
+ const FrameEncodedCallback& frame_encoded_callback)
+ : cast_environment_(cast_environment) {
+ // Note: It doesn't matter which thread constructs AudioEncoder, just so long
+ // as all calls to InsertAudio() are by the same thread.
+ insert_thread_checker_.DetachFromThread();
switch (audio_config.codec) {
case kOpus:
- strncpy(send_codec.plname, "opus", sizeof(send_codec.plname));
- send_codec.pacsize = audio_config.frequency / 50; // 20 ms
- send_codec.rate = audio_config.bitrate; // 64000
+ impl_.reset(new OpusImpl(
+ cast_environment, audio_config.channels, audio_config.frequency,
+ audio_config.bitrate, frame_encoded_callback));
break;
case kPcm16:
- strncpy(send_codec.plname, "L16", sizeof(send_codec.plname));
- send_codec.pacsize = audio_config.frequency / 100; // 10 ms
- // TODO(pwestin) bug in webrtc; it should take audio_config.channels into
- // account.
- send_codec.rate = 8 * 2 * audio_config.frequency;
+ impl_.reset(new Pcm16Impl(
+ cast_environment, audio_config.channels, audio_config.frequency,
+ frame_encoded_callback));
break;
default:
- DCHECK(false) << "Codec must be specified for audio encoder";
- return;
- }
- if (audio_encoder_->RegisterSendCodec(send_codec) != 0) {
- DCHECK(false) << "Invalid webrtc return value; failed to register codec";
+ NOTREACHED() << "Unsupported or unspecified codec for audio encoder";
+ break;
}
}
-AudioEncoder::~AudioEncoder() {
- webrtc::AudioCodingModule::Destroy(audio_encoder_);
-}
+AudioEncoder::~AudioEncoder() {}
-// Called from main cast thread.
-void AudioEncoder::InsertRawAudioFrame(
- const PcmAudioFrame* audio_frame,
+void AudioEncoder::InsertAudio(
+ const AudioBus* audio_bus,
const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback) {
- cast_thread_->PostTask(CastThread::AUDIO_ENCODER, FROM_HERE,
- base::Bind(&AudioEncoder::EncodeAudioFrameThread, this, audio_frame,
- recorded_time, frame_encoded_callback, release_callback));
+ const base::Closure& done_callback) {
+ DCHECK(insert_thread_checker_.CalledOnValidThread());
+ if (!impl_) {
+ NOTREACHED();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ done_callback);
+ return;
+ }
+ cast_environment_->PostTask(CastEnvironment::AUDIO_ENCODER, FROM_HERE,
+ base::Bind(&AudioEncoder::EncodeAudio, this, audio_bus, recorded_time,
+ done_callback));
}
-// Called from cast audio encoder thread.
-void AudioEncoder::EncodeAudioFrameThread(
- const PcmAudioFrame* audio_frame,
+void AudioEncoder::EncodeAudio(
+ const AudioBus* audio_bus,
const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback) {
- int samples_per_10ms = audio_frame->frequency / 100;
- int number_of_10ms_blocks = audio_frame->samples.size() /
- (samples_per_10ms * audio_frame->channels);
- DCHECK(webrtc::AudioFrame::kMaxDataSizeSamples > samples_per_10ms)
- << "webrtc sanity check failed";
-
- for (int i = 0; i < number_of_10ms_blocks; ++i) {
- webrtc::AudioFrame webrtc_audio_frame;
- webrtc_audio_frame.timestamp_ = timestamp_;
-
- // Due to the webrtc::AudioFrame declaration we need to copy our data into
- // the webrtc structure.
- memcpy(&webrtc_audio_frame.data_[0],
- &audio_frame->samples[i * samples_per_10ms * audio_frame->channels],
- samples_per_10ms * audio_frame->channels * sizeof(int16));
- webrtc_audio_frame.samples_per_channel_ = samples_per_10ms;
- webrtc_audio_frame.sample_rate_hz_ = audio_frame->frequency;
- webrtc_audio_frame.num_channels_ = audio_frame->channels;
-
- // webrtc::AudioCodingModule is thread safe.
- if (audio_encoder_->Add10MsData(webrtc_audio_frame) != 0) {
- DCHECK(false) << "Invalid webrtc return value";
- }
- timestamp_ += samples_per_10ms;
- }
- // We are done with the audio frame release it.
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, release_callback);
-
- // Note:
- // Not all insert of 10 ms will generate a callback with encoded data.
- webrtc_encoder_callback_->SetEncodedCallbackInfo(recorded_time,
- &frame_encoded_callback);
- for (int i = 0; i < number_of_10ms_blocks; ++i) {
- audio_encoder_->Process();
- }
+ const base::Closure& done_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_ENCODER));
+ impl_->EncodeAudio(audio_bus, recorded_time, done_callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(LogAudioEncodedEvent, cast_environment_, recorded_time));
}
-} // namespace media
} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_encoder.h b/chromium/media/cast/audio_sender/audio_encoder.h
index 8aacb0b4759..4a22d1983bd 100644
--- a/chromium/media/cast/audio_sender/audio_encoder.h
+++ b/chromium/media/cast/audio_sender/audio_encoder.h
@@ -7,52 +7,59 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/rtp_sender/rtp_sender.h"
+#include "media/cast/cast_environment.h"
-namespace webrtc {
-class AudioCodingModule;
+namespace base {
+class TimeTicks;
}
namespace media {
-namespace cast {
+class AudioBus;
+}
-class WebrtEncodedDataCallback;
+namespace media {
+namespace cast {
-// Thread safe class.
-// It should be called from the main cast thread; however that is not required.
class AudioEncoder : public base::RefCountedThreadSafe<AudioEncoder> {
public:
typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
const base::TimeTicks&)> FrameEncodedCallback;
- AudioEncoder(scoped_refptr<CastThread> cast_thread,
- const AudioSenderConfig& audio_config);
+ AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
+ const AudioSenderConfig& audio_config,
+ const FrameEncodedCallback& frame_encoded_callback);
- virtual ~AudioEncoder();
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
- // The audio_frame must be valid until the closure callback is called.
- // The closure callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure callback);
+ protected:
+ virtual ~AudioEncoder();
private:
- void EncodeAudioFrameThread(
- const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure release_callback);
-
- scoped_refptr<CastThread> cast_thread_;
- // Can't use scoped_ptr due to protected constructor within webrtc.
- webrtc::AudioCodingModule* audio_encoder_;
- scoped_ptr<WebrtEncodedDataCallback> webrtc_encoder_callback_;
- uint32 timestamp_;
+ friend class base::RefCountedThreadSafe<AudioEncoder>;
+
+ class ImplBase;
+ class OpusImpl;
+ class Pcm16Impl;
+
+ // Invokes |impl_|'s encode method on the AUDIO_ENCODER thread while holding
+ // a ref-count on AudioEncoder.
+ void EncodeAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
+
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<ImplBase> impl_;
+
+ // Used to ensure only one thread invokes InsertAudio().
+ base::ThreadChecker insert_thread_checker_;
DISALLOW_COPY_AND_ASSIGN(AudioEncoder);
};
diff --git a/chromium/media/cast/audio_sender/audio_encoder_unittest.cc b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
index 0b17f980569..d721f71ef29 100644
--- a/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/chromium/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -2,69 +2,233 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <sstream>
+#include <string>
+
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media.h"
#include "media/cast/audio_sender/audio_encoder.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/test/audio_utility.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-static void RelaseFrame(const PcmAudioFrame* frame) {
- delete frame;
-}
+namespace {
-static void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& recorded_time) {
-}
+class TestEncodedAudioFrameReceiver {
+ public:
+ explicit TestEncodedAudioFrameReceiver(AudioCodec codec) :
+ codec_(codec), frames_received_(0) {}
+ virtual ~TestEncodedAudioFrameReceiver() {}
+
+ int frames_received() const {
+ return frames_received_;
+ }
+
+ void SetRecordedTimeLowerBound(const base::TimeTicks& t) {
+ lower_bound_ = t;
+ }
+
+ void SetRecordedTimeUpperBound(const base::TimeTicks& t) {
+ upper_bound_ = t;
+ }
+
+ void FrameEncoded(scoped_ptr<EncodedAudioFrame> encoded_frame,
+ const base::TimeTicks& recorded_time) {
+ EXPECT_EQ(codec_, encoded_frame->codec);
+ EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
+ encoded_frame->frame_id);
+ EXPECT_LT(0, encoded_frame->samples);
+ EXPECT_TRUE(!encoded_frame->data.empty());
+
+ EXPECT_LE(lower_bound_, recorded_time);
+ lower_bound_ = recorded_time;
+ EXPECT_GT(upper_bound_, recorded_time);
+
+ ++frames_received_;
+ }
+
+ private:
+ const AudioCodec codec_;
+ int frames_received_;
+ base::TimeTicks lower_bound_;
+ base::TimeTicks upper_bound_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEncodedAudioFrameReceiver);
+};
+
+struct TestScenario {
+ const int64* durations_in_ms;
+ size_t num_durations;
+
+ TestScenario(const int64* d, size_t n)
+ : durations_in_ms(d), num_durations(n) {}
+
+ std::string ToString() const {
+ std::ostringstream out;
+ for (size_t i = 0; i < num_durations; ++i) {
+ if (i > 0)
+ out << ", ";
+ out << durations_in_ms[i];
+ }
+ return out.str();
+ }
+};
+
+} // namespace
-class AudioEncoderTest : public ::testing::Test {
- protected:
+class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
+ public:
AudioEncoderTest() {
+ InitializeMediaLibraryForTesting();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ }
+
+ virtual ~AudioEncoderTest() {}
+
+ void RunTestForCodec(AudioCodec codec) {
+ const TestScenario& scenario = GetParam();
+ SCOPED_TRACE(::testing::Message()
+ << "Durations: " << scenario.ToString());
+
+ CreateObjectsForCodec(codec);
+
+ receiver_->SetRecordedTimeLowerBound(testing_clock_.NowTicks());
+ for (size_t i = 0; i < scenario.num_durations; ++i) {
+ const base::TimeDelta duration =
+ base::TimeDelta::FromMilliseconds(scenario.durations_in_ms[i]);
+ receiver_->SetRecordedTimeUpperBound(
+ testing_clock_.NowTicks() + duration);
+
+ const scoped_ptr<AudioBus> bus(
+ audio_bus_factory_->NextAudioBus(duration));
+
+ const int last_count = release_callback_count_;
+ audio_encoder_->InsertAudio(
+ bus.get(), testing_clock_.NowTicks(),
+ base::Bind(&AudioEncoderTest::IncrementReleaseCallbackCounter,
+ base::Unretained(this)));
+ task_runner_->RunTasks();
+ EXPECT_EQ(1, release_callback_count_ - last_count)
+ << "Release callback was not invoked once.";
+
+ testing_clock_.Advance(duration);
+ }
+
+ DVLOG(1) << "Received " << receiver_->frames_received()
+ << " frames for this test run: " << scenario.ToString();
+ }
+
+ private:
+ void CreateObjectsForCodec(AudioCodec codec) {
AudioSenderConfig audio_config;
- audio_config.codec = kOpus;
+ audio_config.codec = codec;
audio_config.use_external_encoder = false;
- audio_config.frequency = 48000;
+ audio_config.frequency = kDefaultAudioSamplingRate;
audio_config.channels = 2;
- audio_config.bitrate = 64000;
+ audio_config.bitrate = kDefaultAudioEncoderBitrate;
audio_config.rtp_payload_type = 127;
- audio_encoder_ = new AudioEncoder(cast_thread_, audio_config);
+ audio_bus_factory_.reset(new TestAudioBusFactory(
+ audio_config.channels, audio_config.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f));
+
+ receiver_.reset(new TestEncodedAudioFrameReceiver(codec));
+
+ audio_encoder_ = new AudioEncoder(
+ cast_environment_, audio_config,
+ base::Bind(&TestEncodedAudioFrameReceiver::FrameEncoded,
+ base::Unretained(receiver_.get())));
+ release_callback_count_ = 0;
}
- ~AudioEncoderTest() {}
+ void IncrementReleaseCallbackCounter() {
+ ++release_callback_count_;
+ }
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+ scoped_ptr<TestEncodedAudioFrameReceiver> receiver_;
scoped_refptr<AudioEncoder> audio_encoder_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ int release_callback_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioEncoderTest);
};
-TEST_F(AudioEncoderTest, Encode20ms) {
- PcmAudioFrame* audio_frame = new PcmAudioFrame();
- audio_frame->channels = 2;
- audio_frame->frequency = 48000;
- audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
-
- base::TimeTicks recorded_time;
- audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&FrameEncoded),
- base::Bind(&RelaseFrame, audio_frame));
- task_runner_->RunTasks();
+TEST_P(AudioEncoderTest, EncodeOpus) {
+ RunTestForCodec(kOpus);
+}
+
+TEST_P(AudioEncoderTest, EncodePcm16) {
+ RunTestForCodec(kPcm16);
}
+static const int64 kOneCall_3Millis[] = { 3 };
+static const int64 kOneCall_10Millis[] = { 10 };
+static const int64 kOneCall_13Millis[] = { 13 };
+static const int64 kOneCall_20Millis[] = { 20 };
+
+static const int64 kTwoCalls_3Millis[] = { 3, 3 };
+static const int64 kTwoCalls_10Millis[] = { 10, 10 };
+static const int64 kTwoCalls_Mixed1[] = { 3, 10 };
+static const int64 kTwoCalls_Mixed2[] = { 10, 3 };
+static const int64 kTwoCalls_Mixed3[] = { 3, 17 };
+static const int64 kTwoCalls_Mixed4[] = { 17, 3 };
+
+static const int64 kManyCalls_3Millis[] =
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
+static const int64 kManyCalls_10Millis[] =
+ { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 };
+static const int64 kManyCalls_Mixed1[] =
+ { 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10 };
+static const int64 kManyCalls_Mixed2[] =
+ { 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3 };
+static const int64 kManyCalls_Mixed3[] =
+ { 3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4 };
+static const int64 kManyCalls_Mixed4[] =
+ { 31, 4, 15, 9, 26, 53, 5, 8, 9, 7, 9, 32, 38, 4, 62, 64, 3 };
+static const int64 kManyCalls_Mixed5[] =
+ { 3, 14, 15, 9, 26, 53, 58, 9, 7, 9, 3, 23, 8, 4, 6, 2, 6, 43 };
+
+INSTANTIATE_TEST_CASE_P(
+ AudioEncoderTestScenarios, AudioEncoderTest,
+ ::testing::Values(
+ TestScenario(kOneCall_3Millis, arraysize(kOneCall_3Millis)),
+ TestScenario(kOneCall_10Millis, arraysize(kOneCall_10Millis)),
+ TestScenario(kOneCall_13Millis, arraysize(kOneCall_13Millis)),
+ TestScenario(kOneCall_20Millis, arraysize(kOneCall_20Millis)),
+ TestScenario(kTwoCalls_3Millis, arraysize(kTwoCalls_3Millis)),
+ TestScenario(kTwoCalls_10Millis, arraysize(kTwoCalls_10Millis)),
+ TestScenario(kTwoCalls_Mixed1, arraysize(kTwoCalls_Mixed1)),
+ TestScenario(kTwoCalls_Mixed2, arraysize(kTwoCalls_Mixed2)),
+ TestScenario(kTwoCalls_Mixed3, arraysize(kTwoCalls_Mixed3)),
+ TestScenario(kTwoCalls_Mixed4, arraysize(kTwoCalls_Mixed4)),
+ TestScenario(kManyCalls_3Millis, arraysize(kManyCalls_3Millis)),
+ TestScenario(kManyCalls_10Millis, arraysize(kManyCalls_10Millis)),
+ TestScenario(kManyCalls_Mixed1, arraysize(kManyCalls_Mixed1)),
+ TestScenario(kManyCalls_Mixed2, arraysize(kManyCalls_Mixed2)),
+ TestScenario(kManyCalls_Mixed3, arraysize(kManyCalls_Mixed3)),
+ TestScenario(kManyCalls_Mixed4, arraysize(kManyCalls_Mixed4)),
+ TestScenario(kManyCalls_Mixed5, arraysize(kManyCalls_Mixed5))));
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/audio_sender/audio_sender.cc b/chromium/media/cast/audio_sender/audio_sender.cc
index 39fccda6370..b1b177d3ec3 100644
--- a/chromium/media/cast/audio_sender/audio_sender.cc
+++ b/chromium/media/cast/audio_sender/audio_sender.cc
@@ -7,9 +7,12 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/rtp_sender/rtp_sender.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_sender/rtp_sender.h"
namespace media {
namespace cast {
@@ -22,34 +25,6 @@ class LocalRtcpAudioSenderFeedback : public RtcpSenderFeedback {
: audio_sender_(audio_sender) {
}
- virtual void OnReceivedSendReportRequest() OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
-
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
- DCHECK(false) << "Invalid callback";
- }
-
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
if (!cast_feedback.missing_frames_and_packets_.empty()) {
@@ -78,89 +53,156 @@ class LocalRtpSenderStatistics : public RtpSenderStatistics {
RtpSender* rtp_sender_;
};
-AudioSender::AudioSender(scoped_refptr<CastThread> cast_thread,
+AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
PacedPacketSender* const paced_packet_sender)
- : incoming_feedback_ssrc_(audio_config.incoming_feedback_ssrc),
- cast_thread_(cast_thread),
- rtp_sender_(&audio_config, NULL, paced_packet_sender),
+ : cast_environment_(cast_environment),
+ rtp_sender_(cast_environment, &audio_config, NULL,
+ paced_packet_sender),
rtcp_feedback_(new LocalRtcpAudioSenderFeedback(this)),
rtp_audio_sender_statistics_(
new LocalRtpSenderStatistics(&rtp_sender_)),
- rtcp_(rtcp_feedback_.get(),
+ rtcp_(cast_environment,
+ rtcp_feedback_.get(),
paced_packet_sender,
rtp_audio_sender_statistics_.get(),
NULL,
audio_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
- true,
audio_config.sender_ssrc,
+ audio_config.incoming_feedback_ssrc,
audio_config.rtcp_c_name),
- clock_(&default_tick_clock_),
+ initialized_(false),
weak_factory_(this) {
-
- rtcp_.SetRemoteSSRC(audio_config.incoming_feedback_ssrc);
-
+ if (audio_config.aes_iv_mask.size() == kAesKeySize &&
+ audio_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = audio_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, audio_config.aes_key);
+ encryptor_.reset(new crypto::Encryptor());
+ encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (audio_config.aes_iv_mask.size() != 0 ||
+ audio_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
if (!audio_config.use_external_encoder) {
- audio_encoder_ = new AudioEncoder(cast_thread, audio_config);
+ audio_encoder_ = new AudioEncoder(
+ cast_environment, audio_config,
+ base::Bind(&AudioSender::SendEncodedAudioFrame,
+ weak_factory_.GetWeakPtr()));
}
- ScheduleNextRtcpReport();
}
AudioSender::~AudioSender() {}
-void AudioSender::InsertRawAudioFrame(
- const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) {
- DCHECK(audio_encoder_.get()) << "Invalid internal state";
-
+void AudioSender::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!initialized_) {
+ initialized_ = true;
+ ScheduleNextRtcpReport();
+ }
+}
- audio_encoder_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&AudioSender::SendEncodedAudioFrame,
- weak_factory_.GetWeakPtr()),
- callback);
+void AudioSender::InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(audio_encoder_.get()) << "Invalid internal state";
+ // TODO(mikhal): Resolve calculation of the audio rtp_timestamp for logging.
+ // This is a tmp solution to allow the code to build.
+ cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+ audio_encoder_->InsertAudio(audio_bus, recorded_time, done_callback);
}
void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
const base::TimeTicks& recorded_time,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state";
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+
+ cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived,
+ GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown);
+
+ if (encryptor_) {
+ EncodedAudioFrame encrypted_frame;
+ if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ } else {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ }
callback.Run();
}
void AudioSender::SendEncodedAudioFrame(
scoped_ptr<EncodedAudioFrame> audio_frame,
const base::TimeTicks& recorded_time) {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ InitializeTimers();
+ if (encryptor_) {
+ EncodedAudioFrame encrypted_frame;
+ if (!EncryptAudioFrame(*audio_frame.get(), &encrypted_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ } else {
+ rtp_sender_.IncomingEncodedAudioFrame(audio_frame.get(), recorded_time);
+ }
+}
+
+bool AudioSender::EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
+ EncodedAudioFrame* encrypted_frame) {
+ DCHECK(encryptor_) << "Invalid state";
+
+ if (!encryptor_->SetCounter(GetAesNonce(audio_frame.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ if (!encryptor_->Encrypt(audio_frame.data, &encrypted_frame->data)) {
+ NOTREACHED() << "Encrypt error";
+ return false;
+ }
+ encrypted_frame->codec = audio_frame.codec;
+ encrypted_frame->frame_id = audio_frame.frame_id;
+ encrypted_frame->samples = audio_frame.samples;
+ return true;
}
void AudioSender::ResendPackets(
- const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtp_sender_.ResendPackets(missing_frames_and_packets);
}
-void AudioSender::IncomingRtcpPacket(const uint8* packet, int length,
+void AudioSender::IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtcp_.IncomingRtcpPacket(packet, length);
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void AudioSender::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next =
- rtcp_.TimeToSendNextRtcpReport() - clock_->NowTicks();
+ rtcp_.TimeToSendNextRtcpReport() - cast_environment_->Clock()->NowTicks();
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
time_to_next);
}
void AudioSender::SendRtcpReport() {
- rtcp_.SendRtcpReport(incoming_feedback_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // We don't send audio logging messages since all captured audio frames will
+ // be sent.
+ rtcp_.SendRtcpFromRtpSender(NULL);
ScheduleNextRtcpReport();
}
diff --git a/chromium/media/cast/audio_sender/audio_sender.gypi b/chromium/media/cast/audio_sender/audio_sender.gypi
index 3e2a56345b8..9d84b79af8d 100644
--- a/chromium/media/cast/audio_sender/audio_sender.gypi
+++ b/chromium/media/cast/audio_sender/audio_sender.gypi
@@ -10,7 +10,6 @@
'include_dirs': [
'<(DEPTH)/',
'<(DEPTH)/third_party/',
- '<(DEPTH)/third_party/webrtc',
],
'sources': [
'audio_encoder.h',
@@ -19,9 +18,12 @@
'audio_sender.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:cast_rtcp',
- '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
- '<(DEPTH)/third_party/webrtc/webrtc.gyp:webrtc',
+ '<(DEPTH)/media/cast/net/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
],
},
],
diff --git a/chromium/media/cast/audio_sender/audio_sender.h b/chromium/media/cast/audio_sender/audio_sender.h
index 3d389b381f0..68f9e7a4172 100644
--- a/chromium/media/cast/audio_sender/audio_sender.h
+++ b/chromium/media/cast/audio_sender/audio_sender.h
@@ -10,13 +10,20 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/rtp_sender/rtp_sender.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace crypto {
+ class Encryptor;
+}
+
+namespace media {
+class AudioBus;
+}
namespace media {
namespace cast {
@@ -31,19 +38,19 @@ class PacedPacketSender;
class AudioSender : public base::NonThreadSafe,
public base::SupportsWeakPtr<AudioSender> {
public:
- AudioSender(scoped_refptr<CastThread> cast_thread,
+ AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
PacedPacketSender* const paced_packet_sender);
virtual ~AudioSender();
- // The audio_frame must be valid until the closure callback is called.
- // The closure callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback);
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback);
// The audio_frame must be valid until the closure callback is called.
// The closure callback is called from the main cast thread as soon as
@@ -54,16 +61,9 @@ class AudioSender : public base::NonThreadSafe,
const base::Closure callback);
// Only called from the main cast thread.
- void IncomingRtcpPacket(const uint8* packet, int length,
+ void IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback);
- // Only used for testing.
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- rtcp_.set_clock(clock);
- rtp_sender_.set_clock(clock);
- }
-
protected:
void SendEncodedAudioFrame(scoped_ptr<EncodedAudioFrame> audio_frame,
const base::TimeTicks& recorded_time);
@@ -74,21 +74,27 @@ class AudioSender : public base::NonThreadSafe,
void ResendPackets(
const MissingFramesAndPacketsMap& missing_frames_and_packets);
+ // Caller must allocate the destination |encrypted_frame|. The data member
+ // will be resized to hold the encrypted size.
+ bool EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
+ EncodedAudioFrame* encrypted_frame);
+
void ScheduleNextRtcpReport();
void SendRtcpReport();
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
+ void InitializeTimers();
base::WeakPtrFactory<AudioSender> weak_factory_;
- const uint32 incoming_feedback_ssrc_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_refptr<AudioEncoder> audio_encoder_;
RtpSender rtp_sender_;
scoped_ptr<LocalRtpSenderStatistics> rtp_audio_sender_statistics_;
scoped_ptr<LocalRtcpAudioSenderFeedback> rtcp_feedback_;
Rtcp rtcp_;
+ bool initialized_;
+ scoped_ptr<crypto::Encryptor> encryptor_;
+ std::string iv_mask_;
DISALLOW_COPY_AND_ASSIGN(AudioSender);
};
@@ -97,4 +103,3 @@ class AudioSender : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_AUDIO_SENDER_H_
-
diff --git a/chromium/media/cast/audio_sender/audio_sender_unittest.cc b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
index 0b5e2176519..65c2e622d8f 100644
--- a/chromium/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/chromium/media/cast/audio_sender/audio_sender_unittest.cc
@@ -3,77 +3,94 @@
// found in the LICENSE file.
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/base/media.h"
#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/test/audio_utility.h"
#include "media/cast/test/fake_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
using testing::_;
-
-static void RelaseFrame(const PcmAudioFrame* frame) {
- delete frame;
-}
+using testing::AtLeast;
class AudioSenderTest : public ::testing::Test {
protected:
AudioSenderTest() {
+ InitializeMediaLibraryForTesting();
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
- AudioSenderConfig audio_config;
- audio_config.codec = kOpus;
- audio_config.use_external_encoder = false;
- audio_config.frequency = 48000;
- audio_config.channels = 2;
- audio_config.bitrate = 64000;
- audio_config.rtp_payload_type = 127;
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ audio_config_.codec = kOpus;
+ audio_config_.use_external_encoder = false;
+ audio_config_.frequency = kDefaultAudioSamplingRate;
+ audio_config_.channels = 2;
+ audio_config_.bitrate = kDefaultAudioEncoderBitrate;
+ audio_config_.rtp_payload_type = 127;
audio_sender_.reset(
- new AudioSender(cast_thread_, audio_config, &mock_transport_));
- audio_sender_->set_clock(&testing_clock_);
+ new AudioSender(cast_environment_, audio_config_, &mock_transport_));
}
- ~AudioSenderTest() {}
+ virtual ~AudioSenderTest() {}
base::SimpleTestTickClock testing_clock_;
MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_ptr<AudioSender> audio_sender_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ AudioSenderConfig audio_config_;
};
TEST_F(AudioSenderTest, Encode20ms) {
- EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
-
- PcmAudioFrame* audio_frame = new PcmAudioFrame();
- audio_frame->channels = 2;
- audio_frame->frequency = 48000;
- audio_frame->samples.insert(audio_frame->samples.begin(), 480 * 2 * 2, 123);
-
- base::TimeTicks recorded_time;
- audio_sender_->InsertRawAudioFrame(audio_frame, recorded_time,
- base::Bind(&RelaseFrame, audio_frame));
-
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
+
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
+ scoped_ptr<AudioBus> bus(TestAudioBusFactory(
+ audio_config_.channels, audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
+
+ base::TimeTicks recorded_time = base::TimeTicks::Now();
+ audio_sender_->InsertAudio(
+ bus.get(), recorded_time,
+ base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
+ base::Unretained(&bus)));
task_runner_->RunTasks();
+
+ EXPECT_TRUE(!bus) << "AudioBus wasn't released after use.";
}
TEST_F(AudioSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+ const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(20);
+ scoped_ptr<AudioBus> bus(TestAudioBusFactory(
+ audio_config_.channels, audio_config_.frequency,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f).NextAudioBus(kDuration));
+
+ base::TimeTicks recorded_time = base::TimeTicks::Now();
+ audio_sender_->InsertAudio(
+ bus.get(), recorded_time,
+ base::Bind(base::IgnoreResult(&scoped_ptr<AudioBus>::release),
+ base::Unretained(&bus)));
+ task_runner_->RunTasks();
+
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
diff --git a/chromium/media/cast/cast.gyp b/chromium/media/cast/cast.gyp
index 230a2e1d0bd..702272fb289 100644
--- a/chromium/media/cast/cast.gyp
+++ b/chromium/media/cast/cast.gyp
@@ -5,6 +5,7 @@
{
'variables': {
'include_tests%': 1,
+ 'chromium_code': 1,
},
'targets': [
{
@@ -13,41 +14,40 @@
'include_dirs': [
'<(DEPTH)/',
],
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ ],
'sources': [
'cast_config.cc',
'cast_config.h',
- 'cast_thread.cc',
- 'cast_thread.h',
+ 'cast_defines.h',
+ 'cast_environment.cc',
+ 'cast_environment.h',
+ 'logging/logging_defines.cc',
+ 'logging/logging_defines.h',
+ 'logging/logging_impl.cc',
+ 'logging/logging_impl.h',
+ 'logging/logging_raw.cc',
+ 'logging/logging_raw.h',
+ 'logging/logging_stats.cc',
+ 'logging/logging_stats.h',
], # source
},
- {
- 'target_name': 'cast_sender',
- 'type': 'static_library',
- 'dependencies': [
- 'cast_config',
- 'cast_sender.gyp:cast_sender_impl',
- ],
- },
- {
- 'target_name': 'cast_receiver',
- 'type': 'static_library',
- 'dependencies': [
- 'cast_config',
- 'cast_receiver.gyp:cast_receiver_impl',
- ],
- },
], # targets,
'conditions': [
['include_tests==1', {
'targets': [
{
- 'target_name': 'cast_unittest',
+ 'target_name': 'cast_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
- 'cast_sender',
- 'cast_receiver',
- 'rtcp/rtcp.gyp:cast_rtcp_test',
+ 'cast_config',
+ 'cast_receiver.gyp:cast_receiver',
+ 'cast_sender.gyp:cast_sender',
+ 'test/utility/utility.gyp:cast_test_utility',
'<(DEPTH)/base/base.gyp:run_all_unittests',
+ '<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -66,24 +66,92 @@
'framer/cast_message_builder_unittest.cc',
'framer/frame_buffer_unittest.cc',
'framer/framer_unittest.cc',
- 'pacing/paced_sender_unittest.cc',
+ 'net/pacing/mock_paced_packet_sender.cc',
+ 'net/pacing/mock_paced_packet_sender.h',
+ 'net/pacing/paced_sender_unittest.cc',
+ 'rtcp/mock_rtcp_receiver_feedback.cc',
+ 'rtcp/mock_rtcp_receiver_feedback.h',
+ 'rtcp/mock_rtcp_sender_feedback.cc',
+ 'rtcp/mock_rtcp_sender_feedback.h',
'rtcp/rtcp_receiver_unittest.cc',
'rtcp/rtcp_sender_unittest.cc',
'rtcp/rtcp_unittest.cc',
+ 'rtp_receiver/rtp_receiver_defines.h',
+ 'rtp_receiver/mock_rtp_payload_feedback.cc',
+ 'rtp_receiver/mock_rtp_payload_feedback.h',
'rtp_receiver/receiver_stats_unittest.cc',
'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
- 'rtp_sender/packet_storage/packet_storage_unittest.cc',
- 'rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
- 'rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
- 'rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
- 'test/fake_task_runner.cc',
+ 'net/rtp_sender/packet_storage/packet_storage_unittest.cc',
+ 'net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
+ 'net/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
+ 'net/rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
+ 'test/crypto_utility.cc',
+ 'test/crypto_utility.h',
+ 'test/encode_decode_test.cc',
+ 'test/end2end_unittest.cc',
'video_receiver/video_decoder_unittest.cc',
'video_receiver/video_receiver_unittest.cc',
+ 'video_sender/mock_video_encoder_controller.cc',
+ 'video_sender/mock_video_encoder_controller.h',
'video_sender/video_encoder_unittest.cc',
'video_sender/video_sender_unittest.cc',
], # source
},
+ {
+ 'target_name': 'cast_sender_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_config',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/cast/cast_sender.gyp:*',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
+ '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/sender.cc',
+ ],
+ },
+ {
+ 'target_name': 'cast_receiver_app',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_config',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/cast/cast_receiver.gyp:*',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/media/cast/test/transport/transport.gyp:cast_transport',
+ '<(DEPTH)/media/cast/test/utility/utility.gyp:cast_test_utility',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/receiver.cc',
+ ],
+ 'conditions': [
+ ['OS == "linux"', {
+ 'sources': [
+ '<(DEPTH)/media/cast/test/linux_output_window.cc',
+ '<(DEPTH)/media/cast/test/linux_output_window.h',
+ ],
+ 'libraries': [
+ '-lXext',
+ '-lX11',
+ ],
+ }],
+ ],
+ },
], # targets
}], # include_tests
],
diff --git a/chromium/media/cast/cast_config.cc b/chromium/media/cast/cast_config.cc
index 97c707353a8..6c324bd8759 100644
--- a/chromium/media/cast/cast_config.cc
+++ b/chromium/media/cast/cast_config.cc
@@ -45,5 +45,10 @@ EncodedAudioFrame::~EncodedAudioFrame() {}
PcmAudioFrame::PcmAudioFrame() {}
PcmAudioFrame::~PcmAudioFrame() {}
+// static
+void PacketReceiver::DeletePacket(const uint8* packet) {
+ delete [] packet;
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/cast_config.h b/chromium/media/cast/cast_config.h
index 988924aab45..27cc67e5dae 100644
--- a/chromium/media/cast/cast_config.h
+++ b/chromium/media/cast/cast_config.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_CAST_CAST_CONFIG_H_
#define MEDIA_CAST_CAST_CONFIG_H_
+#include <list>
#include <string>
#include <vector>
@@ -50,8 +51,11 @@ struct AudioSenderConfig {
bool use_external_encoder;
int frequency;
int channels;
- int bitrate;
+ int bitrate; // Set to <= 0 for "auto variable bitrate" (libopus knows best).
AudioCodec codec;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct VideoSenderConfig {
@@ -82,6 +86,9 @@ struct VideoSenderConfig {
int max_number_of_video_buffers_used; // Max value depend on codec.
VideoCodec codec;
int number_of_cores;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct AudioReceiverConfig {
@@ -102,6 +109,9 @@ struct AudioReceiverConfig {
int frequency;
int channels;
AudioCodec codec;
+
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct VideoReceiverConfig {
@@ -125,20 +135,9 @@ struct VideoReceiverConfig {
// from catching up after a glitch.
bool decoder_faster_than_max_frame_rate;
VideoCodec codec;
-};
-
-struct I420VideoPlane {
- int stride;
- int length;
- uint8* data;
-};
-struct I420VideoFrame {
- int width;
- int height;
- I420VideoPlane y_plane;
- I420VideoPlane u_plane;
- I420VideoPlane v_plane;
+ std::string aes_key; // Binary string of size kAesKeySize.
+ std::string aes_iv_mask; // Binary string of size kAesKeySize.
};
struct EncodedVideoFrame {
@@ -147,11 +146,13 @@ struct EncodedVideoFrame {
VideoCodec codec;
bool key_frame;
- uint8 frame_id;
- uint8 last_referenced_frame_id;
- std::vector<uint8> data;
+ uint32 frame_id;
+ uint32 last_referenced_frame_id;
+ std::string data;
};
+// DEPRECATED: Do not use in new code. Please migrate existing code to use
+// media::AudioBus.
struct PcmAudioFrame {
PcmAudioFrame();
~PcmAudioFrame();
@@ -166,16 +167,24 @@ struct EncodedAudioFrame {
~EncodedAudioFrame();
AudioCodec codec;
- uint8 frame_id; // Needed to release the frame. Not used send side.
+ uint32 frame_id; // Needed to release the frame.
int samples; // Needed send side to advance the RTP timestamp.
// Not used receive side.
- std::vector<uint8> data;
+ // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
+ static const int kMaxNumberOfSamples = 48 * 2 * 100;
+ std::string data;
};
+typedef std::vector<uint8> Packet;
+typedef std::vector<Packet> PacketList;
+
class PacketSender {
public:
- // All packets to be sent to the network will be delivered via this function.
- virtual bool SendPacket(const uint8* packet, int length) = 0;
+ // All packets to be sent to the network will be delivered via these
+ // functions.
+ virtual bool SendPackets(const PacketList& packets) = 0;
+
+ virtual bool SendPacket(const Packet& packet) = 0;
virtual ~PacketSender() {}
};
@@ -184,10 +193,16 @@ class PacketReceiver : public base::RefCountedThreadSafe<PacketReceiver> {
public:
// All packets received from the network should be delivered via this
// function.
- virtual void ReceivedPacket(const uint8* packet, int length,
+ virtual void ReceivedPacket(const uint8* packet, size_t length,
const base::Closure callback) = 0;
+ static void DeletePacket(const uint8* packet);
+
+ protected:
virtual ~PacketReceiver() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<PacketReceiver>;
};
class VideoEncoderController {
@@ -203,7 +218,7 @@ class VideoEncoderController {
virtual void GenerateKeyFrame() = 0;
// Inform the encoder to only reference frames older or equal to frame_id;
- virtual void LatestFrameIdToReference(uint8 frame_id) = 0;
+ virtual void LatestFrameIdToReference(uint32 frame_id) = 0;
// Query the codec about how many frames it has skipped due to slow ACK.
virtual int NumberOfSkippedFrames() const = 0;
diff --git a/chromium/media/cast/cast_defines.h b/chromium/media/cast/cast_defines.h
index 13717323405..aad7ae2b1d8 100644
--- a/chromium/media/cast/cast_defines.h
+++ b/chromium/media/cast/cast_defines.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/time/time.h"
namespace media {
@@ -17,9 +18,9 @@ namespace cast {
const int64 kDontShowTimeoutMs = 33;
const float kDefaultCongestionControlBackOff = 0.875f;
-const uint8 kStartFrameId = 255;
const uint32 kVideoFrequency = 90000;
const int64 kSkippedFramesCheckPeriodkMs = 10000;
+const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
// Number of skipped frames threshold in fps (as configured) per period above.
const int kSkippedFramesThreshold = 3;
@@ -29,6 +30,8 @@ const int64 kCastMessageUpdateIntervalMs = 33;
const int64 kNackRepeatIntervalMs = 30;
enum DefaultSettings {
+ kDefaultAudioEncoderBitrate = 0, // This means "auto," and may mean VBR.
+ kDefaultAudioSamplingRate = 48000,
kDefaultMaxQp = 56,
kDefaultMinQp = 4,
kDefaultMaxFrameRate = 30,
@@ -40,6 +43,14 @@ enum DefaultSettings {
const uint16 kRtcpCastAllPacketsLost = 0xffff;
+const size_t kMinLengthOfRtcp = 8;
+
+// Basic RTP header + cast header.
+const size_t kMinLengthOfRtp = 12 + 6;
+
+const size_t kAesBlockSize = 16;
+const size_t kAesKeySize = 16;
+
// Each uint16 represents one packet id within a cast frame.
typedef std::set<uint16> PacketIdSet;
// Each uint8 represents one cast frame.
@@ -48,22 +59,26 @@ typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
// TODO(pwestin): Re-factor the functions bellow into a class with static
// methods.
+// January 1970, in NTP seconds.
+// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
+// 1 January 1900.
+static const int64 kUnixEpochInNtpSeconds = GG_INT64_C(2208988800);
+
// Magic fractional unit. Used to convert time (in microseconds) to/from
// fractional NTP seconds.
static const double kMagicFractionalUnit = 4.294967296E3;
-// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
-// 1 January 1900.
-static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
-static const int64 kNtpEpochDeltaMicroseconds =
- kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
-
-inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
+inline bool IsNewerFrameId(uint32 frame_id, uint32 prev_frame_id) {
return (frame_id != prev_frame_id) &&
- static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
+ static_cast<uint32>(frame_id - prev_frame_id) < 0x80000000;
+}
+
+inline bool IsNewerRtpTimestamp(uint32 timestamp, uint32 prev_timestamp) {
+ return (timestamp != prev_timestamp) &&
+ static_cast<uint32>(timestamp - prev_timestamp) < 0x80000000;
}
-inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
+inline bool IsOlderFrameId(uint32 frame_id, uint32 prev_frame_id) {
return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
}
@@ -95,25 +110,57 @@ inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
inline void ConvertTimeToFractions(int64 time_us,
uint32* seconds,
uint32* fractions) {
+ DCHECK_GE(time_us, 0) << "Time must NOT be negative";
*seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
*fractions = static_cast<uint32>(
(time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
}
-inline void ConvertTimeToNtp(const base::TimeTicks& time,
- uint32* ntp_seconds,
- uint32* ntp_fractions) {
- int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
- ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
+inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
+ uint32* ntp_seconds,
+ uint32* ntp_fractions) {
+ base::TimeDelta elapsed_since_unix_epoch =
+ time - base::TimeTicks::UnixEpoch();
+
+ int64 ntp_time_us = elapsed_since_unix_epoch.InMicroseconds() +
+ (kUnixEpochInNtpSeconds * base::Time::kMicrosecondsPerSecond);
+
+ ConvertTimeToFractions(ntp_time_us, ntp_seconds, ntp_fractions);
}
-inline base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
- uint32 ntp_fractions) {
+inline base::TimeTicks ConvertNtpToTimeTicks(uint32 ntp_seconds,
+ uint32 ntp_fractions) {
int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
- base::Time::kMicrosecondsPerSecond;
- ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
- return base::TimeTicks::FromInternalValue(ntp_time_us +
- kNtpEpochDeltaMicroseconds);
+ base::Time::kMicrosecondsPerSecond +
+ static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+
+ base::TimeDelta elapsed_since_unix_epoch =
+ base::TimeDelta::FromMicroseconds(ntp_time_us -
+ (kUnixEpochInNtpSeconds * base::Time::kMicrosecondsPerSecond));
+ return base::TimeTicks::UnixEpoch() + elapsed_since_unix_epoch;
+}
+
+inline std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
+ std::string aes_nonce(kAesBlockSize, 0);
+
+ // Serializing frame_id in big-endian order (aes_nonce[8] is the most
+ // significant byte of frame_id).
+ aes_nonce[11] = frame_id & 0xff;
+ aes_nonce[10] = (frame_id >> 8) & 0xff;
+ aes_nonce[9] = (frame_id >> 16) & 0xff;
+ aes_nonce[8] = (frame_id >> 24) & 0xff;
+
+ for (size_t i = 0; i < kAesBlockSize; ++i) {
+ aes_nonce[i] ^= iv_mask[i];
+ }
+ return aes_nonce;
+}
+
+inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
+ base::TimeTicks zero_time;
+ base::TimeDelta recorded_delta = time_ticks - zero_time;
+ // Timestamp is in 90 KHz for video.
+ return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
}
} // namespace cast
diff --git a/chromium/media/cast/cast_environment.cc b/chromium/media/cast/cast_environment.cc
new file mode 100644
index 00000000000..be636bb253d
--- /dev/null
+++ b/chromium/media/cast/cast_environment.cc
@@ -0,0 +1,101 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_environment.h"
+
+#include "base/logging.h"
+
+using base::TaskRunner;
+
+namespace media {
+namespace cast {
+
+CastEnvironment::CastEnvironment(
+ base::TickClock* clock,
+ scoped_refptr<TaskRunner> main_thread_proxy,
+ scoped_refptr<TaskRunner> audio_encode_thread_proxy,
+ scoped_refptr<TaskRunner> audio_decode_thread_proxy,
+ scoped_refptr<TaskRunner> video_encode_thread_proxy,
+ scoped_refptr<TaskRunner> video_decode_thread_proxy,
+ const CastLoggingConfig& config)
+ : clock_(clock),
+ main_thread_proxy_(main_thread_proxy),
+ audio_encode_thread_proxy_(audio_encode_thread_proxy),
+ audio_decode_thread_proxy_(audio_decode_thread_proxy),
+ video_encode_thread_proxy_(video_encode_thread_proxy),
+ video_decode_thread_proxy_(video_decode_thread_proxy),
+ logging_(new LoggingImpl(clock, main_thread_proxy, config)) {
+ DCHECK(main_thread_proxy) << "Main thread required";
+}
+
+CastEnvironment::~CastEnvironment() {}
+
+bool CastEnvironment::PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ scoped_refptr<TaskRunner> task_runner =
+ GetMessageTaskRunnerForThread(identifier);
+
+ return task_runner->PostTask(from_here, task);
+}
+
+bool CastEnvironment::PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ scoped_refptr<TaskRunner> task_runner =
+ GetMessageTaskRunnerForThread(identifier);
+
+ return task_runner->PostDelayedTask(from_here, task, delay);
+}
+
+scoped_refptr<TaskRunner> CastEnvironment::GetMessageTaskRunnerForThread(
+ ThreadId identifier) {
+ switch (identifier) {
+ case CastEnvironment::MAIN:
+ return main_thread_proxy_;
+ case CastEnvironment::AUDIO_ENCODER:
+ return audio_encode_thread_proxy_;
+ case CastEnvironment::AUDIO_DECODER:
+ return audio_decode_thread_proxy_;
+ case CastEnvironment::VIDEO_ENCODER:
+ return video_encode_thread_proxy_;
+ case CastEnvironment::VIDEO_DECODER:
+ return video_decode_thread_proxy_;
+ default:
+ NOTREACHED() << "Invalid Thread identifier";
+ return NULL;
+ }
+}
+
+bool CastEnvironment::CurrentlyOn(ThreadId identifier) {
+ switch (identifier) {
+ case CastEnvironment::MAIN:
+ return main_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::AUDIO_ENCODER:
+ return audio_encode_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::AUDIO_DECODER:
+ return audio_decode_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::VIDEO_ENCODER:
+ return video_encode_thread_proxy_->RunsTasksOnCurrentThread();
+ case CastEnvironment::VIDEO_DECODER:
+ return video_decode_thread_proxy_->RunsTasksOnCurrentThread();
+ default:
+ NOTREACHED() << "Invalid thread identifier";
+ return false;
+ }
+}
+
+base::TickClock* CastEnvironment::Clock() const {
+ return clock_;
+}
+
+LoggingImpl* CastEnvironment::Logging() {
+ DCHECK(CurrentlyOn(CastEnvironment::MAIN)) <<
+ "Must be called from main thread";
+ return logging_.get();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast_thread.h b/chromium/media/cast/cast_environment.h
index b004157042e..8a135733c04 100644
--- a/chromium/media/cast/cast_thread.h
+++ b/chromium/media/cast/cast_environment.h
@@ -2,18 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_CAST_THREAD_H_
-#define MEDIA_CAST_CAST_THREAD_H_
+#ifndef MEDIA_CAST_CAST_ENVIRONMENT_H_
+#define MEDIA_CAST_CAST_ENVIRONMENT_H_
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/task_runner.h"
+#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/logging_impl.h"
namespace media {
namespace cast {
-class CastThread : public base::RefCountedThreadSafe<CastThread> {
+class CastEnvironment : public base::RefCountedThreadSafe<CastEnvironment> {
public:
// An enumeration of the cast threads.
enum ThreadId {
@@ -32,11 +36,13 @@ class CastThread : public base::RefCountedThreadSafe<CastThread> {
VIDEO_DECODER,
};
- CastThread(scoped_refptr<base::TaskRunner> main_thread_proxy,
- scoped_refptr<base::TaskRunner> audio_encode_thread_proxy,
- scoped_refptr<base::TaskRunner> audio_decode_thread_proxy,
- scoped_refptr<base::TaskRunner> video_encode_thread_proxy,
- scoped_refptr<base::TaskRunner> video_decode_thread_proxy);
+ CastEnvironment(base::TickClock* clock,
+ scoped_refptr<base::TaskRunner> main_thread_proxy,
+ scoped_refptr<base::TaskRunner> audio_encode_thread_proxy,
+ scoped_refptr<base::TaskRunner> audio_decode_thread_proxy,
+ scoped_refptr<base::TaskRunner> video_encode_thread_proxy,
+ scoped_refptr<base::TaskRunner> video_decode_thread_proxy,
+ const CastLoggingConfig& config);
// These are the same methods in message_loop.h, but are guaranteed to either
// get posted to the MessageLoop if it's still alive, or be deleted otherwise.
@@ -52,20 +58,35 @@ class CastThread : public base::RefCountedThreadSafe<CastThread> {
const base::Closure& task,
base::TimeDelta delay);
+ bool CurrentlyOn(ThreadId identifier);
+
+ base::TickClock* Clock() const;
+
+ // Logging is not thread safe. Should always be called from the main thread.
+ LoggingImpl* Logging();
+
+ protected:
+ virtual ~CastEnvironment();
+
private:
+ friend class base::RefCountedThreadSafe<CastEnvironment>;
+
scoped_refptr<base::TaskRunner> GetMessageTaskRunnerForThread(
ThreadId identifier);
+ base::TickClock* const clock_; // Not owned by this class.
scoped_refptr<base::TaskRunner> main_thread_proxy_;
scoped_refptr<base::TaskRunner> audio_encode_thread_proxy_;
scoped_refptr<base::TaskRunner> audio_decode_thread_proxy_;
scoped_refptr<base::TaskRunner> video_encode_thread_proxy_;
scoped_refptr<base::TaskRunner> video_decode_thread_proxy_;
- DISALLOW_COPY_AND_ASSIGN(CastThread);
+ scoped_ptr<LoggingImpl> logging_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastEnvironment);
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_CAST_THREAD_H_
+#endif // MEDIA_CAST_CAST_ENVIRONMENT_H_
diff --git a/chromium/media/cast/cast_receiver.gyp b/chromium/media/cast/cast_receiver.gyp
index 539c41d89a9..031aec7e16a 100644
--- a/chromium/media/cast/cast_receiver.gyp
+++ b/chromium/media/cast/cast_receiver.gyp
@@ -9,19 +9,24 @@
],
'targets': [
{
- 'target_name': 'cast_receiver_impl',
+ 'target_name': 'cast_receiver',
'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ '<(DEPTH)/third_party/webrtc/',
+ ],
'sources': [
'cast_receiver.h',
-# 'cast_receiver_impl.cc',
-# 'cast_receiver_impl.h',
+ 'cast_receiver_impl.cc',
+ 'cast_receiver_impl.h',
], # source
'dependencies': [
- 'rtp_receiver/rtp_receiver.gyp:*',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'cast_audio_receiver',
'cast_video_receiver',
- 'framer/framer.gyp:cast_framer',
- 'pacing/paced_sender.gyp:paced_sender',
+ 'net/pacing/paced_sender.gyp:cast_paced_sender',
+ 'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
],
},
],
diff --git a/chromium/media/cast/cast_receiver.h b/chromium/media/cast/cast_receiver.h
index a2eef765607..75e6f68d3bb 100644
--- a/chromium/media/cast/cast_receiver.h
+++ b/chromium/media/cast/cast_receiver.h
@@ -14,47 +14,60 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
+
+namespace media {
+class VideoFrame;
+}
namespace media {
namespace cast {
-// Callback in which the raw audio frame and render time will be returned
+// Callback in which the raw audio frame and play-out time will be returned
// once decoding is complete.
-typedef base::Callback<void(scoped_ptr<PcmAudioFrame>,
- const base::TimeTicks)> AudioFrameDecodedCallback;
+typedef base::Callback<void(scoped_ptr<PcmAudioFrame>, const base::TimeTicks&)>
+ AudioFrameDecodedCallback;
+
+// Callback in which the encoded audio frame and play-out time will be returned.
+typedef base::Callback<void(scoped_ptr<EncodedAudioFrame>,
+ const base::TimeTicks&)> AudioFrameEncodedCallback;
// Callback in which the raw frame and render time will be returned once
// decoding is complete.
-typedef base::Callback<void(scoped_ptr<I420VideoFrame>,
- const base::TimeTicks)> VideoFrameDecodedCallback;
+typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks&)>
+ VideoFrameDecodedCallback;
+
+// Callback in which the encoded video frame and render time will be returned.
+typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
+ const base::TimeTicks&)> VideoFrameEncodedCallback;
// This Class is thread safe.
-class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver>{
+class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver> {
public:
- virtual bool GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
-
- virtual bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- base::TimeTicks* render_time) = 0;
-
- virtual void ReleaseEncodedVideoFrame(uint8 frame_id) = 0;
-
- virtual bool GetRawAudioFrame(int number_of_10ms_blocks,
+ virtual void GetRawAudioFrame(int number_of_10ms_blocks,
int desired_frequency,
- const AudioFrameDecodedCallback callback) = 0;
+ const AudioFrameDecodedCallback& callback) = 0;
- virtual bool GetCodedAudioFrame(EncodedAudioFrame* audio_frame,
- base::TimeTicks* playout_time) = 0;
+ virtual void GetCodedAudioFrame(
+ const AudioFrameEncodedCallback& callback) = 0;
- virtual void ReleaseCodedAudioFrame(uint8 frame_id) = 0;
+ virtual void GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
+ virtual void GetEncodedVideoFrame(
+ const VideoFrameEncodedCallback& callback) = 0;
+
+ protected:
virtual ~FrameReceiver() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<FrameReceiver>;
};
// This Class is thread safe.
class CastReceiver {
public:
static CastReceiver* CreateCastReceiver(
- scoped_refptr<CastThread> cast_thread,
+ scoped_refptr<CastEnvironment> cast_environment,
const AudioReceiverConfig& audio_config,
const VideoReceiverConfig& video_config,
PacketSender* const packet_sender);
@@ -66,7 +79,7 @@ class CastReceiver {
// Polling interface to get audio and video frames from the CastReceiver.
virtual scoped_refptr<FrameReceiver> frame_receiver() = 0;
- virtual ~CastReceiver() {};
+ virtual ~CastReceiver() {}
};
} // namespace cast
diff --git a/chromium/media/cast/cast_receiver_impl.cc b/chromium/media/cast/cast_receiver_impl.cc
new file mode 100644
index 00000000000..e2c004fe963
--- /dev/null
+++ b/chromium/media/cast/cast_receiver_impl.cc
@@ -0,0 +1,175 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_receiver_impl.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+
+// The video and audio receivers should only be called from the main thread.
+// LocalFrameReciever posts tasks to the main thread, making the cast interface
+// thread safe.
+class LocalFrameReceiver : public FrameReceiver {
+ public:
+ LocalFrameReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ AudioReceiver* audio_receiver,
+ VideoReceiver* video_receiver)
+ : cast_environment_(cast_environment),
+ audio_receiver_(audio_receiver),
+ video_receiver_(video_receiver) {}
+
+ virtual void GetRawVideoFrame(
+ const VideoFrameDecodedCallback& callback) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetRawVideoFrame,
+ video_receiver_->AsWeakPtr(), callback));
+ }
+
+ virtual void GetEncodedVideoFrame(
+ const VideoFrameEncodedCallback& callback) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetEncodedVideoFrame,
+ video_receiver_->AsWeakPtr(), callback));
+ }
+
+ virtual void GetRawAudioFrame(
+ int number_of_10ms_blocks,
+ int desired_frequency,
+ const AudioFrameDecodedCallback& callback) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
+ &AudioReceiver::GetRawAudioFrame, audio_receiver_->AsWeakPtr(),
+ number_of_10ms_blocks, desired_frequency, callback));
+ }
+
+ virtual void GetCodedAudioFrame(
+ const AudioFrameEncodedCallback& callback) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::GetEncodedAudioFrame,
+ audio_receiver_->AsWeakPtr(), callback));
+ }
+
+ protected:
+ virtual ~LocalFrameReceiver() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<LocalFrameReceiver>;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
+ AudioReceiver* audio_receiver_;
+ VideoReceiver* video_receiver_;
+};
+
+// The video and audio receivers should only be called from the main thread.
+class LocalPacketReceiver : public PacketReceiver {
+ public:
+ LocalPacketReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ AudioReceiver* audio_receiver,
+ VideoReceiver* video_receiver,
+ uint32 ssrc_of_audio_sender,
+ uint32 ssrc_of_video_sender)
+ : cast_environment_(cast_environment),
+ audio_receiver_(audio_receiver),
+ video_receiver_(video_receiver),
+ ssrc_of_audio_sender_(ssrc_of_audio_sender),
+ ssrc_of_video_sender_(ssrc_of_video_sender) {}
+
+ virtual void ReceivedPacket(const uint8* packet,
+ size_t length,
+ const base::Closure callback) OVERRIDE {
+ if (length < kMinLengthOfRtcp) {
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Received a packet which is too short " << length;
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ return;
+ }
+ uint32 ssrc_of_sender;
+ if (!Rtcp::IsRtcpPacket(packet, length)) {
+ if (length < kMinLengthOfRtp) {
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Received a RTP packet which is too short " << length;
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ return;
+ }
+ ssrc_of_sender = RtpReceiver::GetSsrcOfSender(packet, length);
+ } else {
+ ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
+ }
+ if (ssrc_of_sender == ssrc_of_audio_sender_) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioReceiver::IncomingPacket,
+ audio_receiver_->AsWeakPtr(), packet, length, callback));
+ } else if (ssrc_of_sender == ssrc_of_video_sender_) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::IncomingPacket,
+ video_receiver_->AsWeakPtr(), packet, length, callback));
+ } else {
+ // No action; just log and call the callback informing that we are done
+ // with the packet.
+ VLOG(1) << "Received a packet with a non matching sender SSRC "
+ << ssrc_of_sender;
+
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
+ }
+ }
+
+ protected:
+ virtual ~LocalPacketReceiver() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<LocalPacketReceiver>;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
+ AudioReceiver* audio_receiver_;
+ VideoReceiver* video_receiver_;
+ const uint32 ssrc_of_audio_sender_;
+ const uint32 ssrc_of_video_sender_;
+};
+
+CastReceiver* CastReceiver::CreateCastReceiver(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const AudioReceiverConfig& audio_config,
+ const VideoReceiverConfig& video_config,
+ PacketSender* const packet_sender) {
+ return new CastReceiverImpl(cast_environment,
+ audio_config,
+ video_config,
+ packet_sender);
+}
+
+CastReceiverImpl::CastReceiverImpl(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const AudioReceiverConfig& audio_config,
+ const VideoReceiverConfig& video_config,
+ PacketSender* const packet_sender)
+ : pacer_(cast_environment, packet_sender),
+ audio_receiver_(cast_environment, audio_config, &pacer_),
+ video_receiver_(cast_environment, video_config, &pacer_),
+ frame_receiver_(new LocalFrameReceiver(cast_environment,
+ &audio_receiver_,
+ &video_receiver_)),
+ packet_receiver_(new LocalPacketReceiver(cast_environment,
+ &audio_receiver_,
+ &video_receiver_,
+ audio_config.incoming_ssrc,
+ video_config.incoming_ssrc)) {}
+
+CastReceiverImpl::~CastReceiverImpl() {}
+
+scoped_refptr<PacketReceiver> CastReceiverImpl::packet_receiver() {
+ return packet_receiver_;
+}
+
+scoped_refptr<FrameReceiver> CastReceiverImpl::frame_receiver() {
+ return frame_receiver_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/cast_receiver_impl.h b/chromium/media/cast/cast_receiver_impl.h
new file mode 100644
index 00000000000..d34a3de6514
--- /dev/null
+++ b/chromium/media/cast/cast_receiver_impl.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_RECEIVER_IMPL_H_
+#define MEDIA_CAST_CAST_RECEIVER_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/audio_receiver/audio_receiver.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/video_receiver/video_receiver.h"
+
+namespace media {
+namespace cast {
+
+// This calls is a pure owner class that group all required receive objects
+// together such as pacer, packet receiver, frame receiver, audio and video
+// receivers.
+class CastReceiverImpl : public CastReceiver {
+ public:
+ CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioReceiverConfig& audio_config,
+ const VideoReceiverConfig& video_config,
+ PacketSender* const packet_sender);
+
+ virtual ~CastReceiverImpl();
+
+ // All received RTP and RTCP packets for the call should be inserted to this
+ // PacketReceiver.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE;
+
+ // Interface to get audio and video frames from the CastReceiver.
+ virtual scoped_refptr<FrameReceiver> frame_receiver() OVERRIDE;
+
+ private:
+ PacedSender pacer_;
+ AudioReceiver audio_receiver_;
+ VideoReceiver video_receiver_;
+ scoped_refptr<FrameReceiver> frame_receiver_;
+ scoped_refptr<PacketReceiver> packet_receiver_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_RECEIVER_IMPL_
diff --git a/chromium/media/cast/cast_sender.gyp b/chromium/media/cast/cast_sender.gyp
index fe99f803820..1f9b07e4a42 100644
--- a/chromium/media/cast/cast_sender.gyp
+++ b/chromium/media/cast/cast_sender.gyp
@@ -10,7 +10,7 @@
],
'targets': [
{
- 'target_name': 'cast_sender_impl',
+ 'target_name': 'cast_sender',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
@@ -23,11 +23,12 @@
'cast_sender_impl.h',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'audio_sender',
'congestion_control',
- 'pacing/paced_sender.gyp:paced_sender',
+ 'net/pacing/paced_sender.gyp:cast_paced_sender',
+ 'net/rtp_sender/rtp_sender.gyp:cast_rtp_sender',
'rtcp/rtcp.gyp:cast_rtcp',
- 'rtp_sender/rtp_sender.gyp:cast_rtp_sender',
'video_sender',
], # dependencies
},
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
index f4d36539b44..abe22f56345 100644
--- a/chromium/media/cast/cast_sender.h
+++ b/chromium/media/cast/cast_sender.h
@@ -14,23 +14,29 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
+
+namespace media {
+class AudioBus;
+class VideoFrame;
+}
namespace media {
namespace cast {
// This Class is thread safe.
-class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
+class FrameInput : public base::RefCountedThreadSafe<FrameInput> {
public:
// The video_frame must be valid until the callback is called.
// The callback is called from the main cast thread as soon as
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
- virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) = 0;
+ virtual void InsertRawVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time) = 0;
// The video_frame must be valid until the callback is called.
// The callback is called from the main cast thread as soon as
@@ -40,13 +46,13 @@ class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
const base::TimeTicks& capture_time,
const base::Closure callback) = 0;
- // The audio_frame must be valid until the callback is called.
- // The callback is called from the main cast thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) = 0;
+ // The |audio_bus| must be valid until the |done_callback| is called.
+ // The callback is called from the main cast thread as soon as the encoder is
+ // done with |audio_bus|; it does not mean that the encoded data has been
+ // sent out.
+ virtual void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) = 0;
// The audio_frame must be valid until the callback is called.
// The callback is called from the main cast thread as soon as
@@ -56,7 +62,11 @@ class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
const base::TimeTicks& recorded_time,
const base::Closure callback) = 0;
+ protected:
virtual ~FrameInput() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<FrameInput>;
};
// This Class is thread safe.
@@ -65,13 +75,13 @@ class FrameInput : public base::RefCountedThreadSafe<PacketReceiver> {
class CastSender {
public:
static CastSender* CreateCastSender(
- scoped_refptr<CastThread> cast_thread,
+ scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacketSender* const packet_sender);
- virtual ~CastSender() {};
+ virtual ~CastSender() {}
// All audio and video frames for the session should be inserted to this
// object.
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
index 76f2f997651..69ebd53c6bd 100644
--- a/chromium/media/cast/cast_sender_impl.cc
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -7,6 +7,7 @@
#include "base/callback.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
namespace media {
namespace cast {
@@ -16,47 +17,52 @@ namespace cast {
// This make the cast sender interface thread safe.
class LocalFrameInput : public FrameInput {
public:
- LocalFrameInput(scoped_refptr<CastThread> cast_thread,
+ LocalFrameInput(scoped_refptr<CastEnvironment> cast_environment,
base::WeakPtr<AudioSender> audio_sender,
base::WeakPtr<VideoSender> video_sender)
- : cast_thread_(cast_thread),
+ : cast_environment_(cast_environment),
audio_sender_(audio_sender),
video_sender_(video_sender) {}
- virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) OVERRIDE {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ virtual void InsertRawVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::InsertRawVideoFrame, video_sender_,
- video_frame, capture_time, callback));
+ video_frame, capture_time));
}
virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
const base::TimeTicks& capture_time,
const base::Closure callback) OVERRIDE {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::InsertCodedVideoFrame, video_sender_,
video_frame, capture_time, callback));
}
- virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time,
- const base::Closure callback) OVERRIDE {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
- base::Bind(&AudioSender::InsertRawAudioFrame, audio_sender_,
- audio_frame, recorded_time, callback));
+ virtual void InsertAudio(const AudioBus* audio_bus,
+ const base::TimeTicks& recorded_time,
+ const base::Closure& done_callback) OVERRIDE {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&AudioSender::InsertAudio, audio_sender_,
+ audio_bus, recorded_time, done_callback));
}
virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
const base::TimeTicks& recorded_time,
const base::Closure callback) OVERRIDE {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioSender::InsertCodedAudioFrame, audio_sender_,
audio_frame, recorded_time, callback));
}
+ protected:
+ virtual ~LocalFrameInput() {}
+
private:
- scoped_refptr<CastThread> cast_thread_;
+ friend class base::RefCountedThreadSafe<LocalFrameInput>;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
base::WeakPtr<AudioSender> audio_sender_;
base::WeakPtr<VideoSender> video_sender_;
};
@@ -89,37 +95,35 @@ class LocalFrameInput : public FrameInput {
class LocalCastSenderPacketReceiver : public PacketReceiver {
public:
- LocalCastSenderPacketReceiver(scoped_refptr<CastThread> cast_thread,
+ LocalCastSenderPacketReceiver(scoped_refptr<CastEnvironment> cast_environment,
base::WeakPtr<AudioSender> audio_sender,
base::WeakPtr<VideoSender> video_sender,
uint32 ssrc_of_audio_sender,
uint32 ssrc_of_video_sender)
- : cast_thread_(cast_thread),
- audio_sender_(audio_sender),
- video_sender_(video_sender),
- ssrc_of_audio_sender_(ssrc_of_audio_sender),
- ssrc_of_video_sender_(ssrc_of_video_sender) {}
-
- virtual ~LocalCastSenderPacketReceiver() {}
+ : cast_environment_(cast_environment),
+ audio_sender_(audio_sender),
+ video_sender_(video_sender),
+ ssrc_of_audio_sender_(ssrc_of_audio_sender),
+ ssrc_of_video_sender_(ssrc_of_video_sender) {}
virtual void ReceivedPacket(const uint8* packet,
- int length,
+ size_t length,
const base::Closure callback) OVERRIDE {
if (!Rtcp::IsRtcpPacket(packet, length)) {
// We should have no incoming RTP packets.
// No action; just log and call the callback informing that we are done
// with the packet.
VLOG(1) << "Unexpectedly received a RTP packet in the cast sender";
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
return;
}
uint32 ssrc_of_sender = Rtcp::GetSsrcOfSender(packet, length);
if (ssrc_of_sender == ssrc_of_audio_sender_) {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioSender::IncomingRtcpPacket, audio_sender_,
packet, length, callback));
} else if (ssrc_of_sender == ssrc_of_video_sender_) {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::IncomingRtcpPacket, video_sender_,
packet, length, callback));
} else {
@@ -128,25 +132,30 @@ class LocalCastSenderPacketReceiver : public PacketReceiver {
VLOG(1) << "Received a RTCP packet with a non matching sender SSRC "
<< ssrc_of_sender;
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
}
+ protected:
+ virtual ~LocalCastSenderPacketReceiver() {}
+
private:
- scoped_refptr<CastThread> cast_thread_;
+ friend class base::RefCountedThreadSafe<LocalCastSenderPacketReceiver>;
+
+ scoped_refptr<CastEnvironment> cast_environment_;
base::WeakPtr<AudioSender> audio_sender_;
base::WeakPtr<VideoSender> video_sender_;
- uint32 ssrc_of_audio_sender_;
- uint32 ssrc_of_video_sender_;
+ const uint32 ssrc_of_audio_sender_;
+ const uint32 ssrc_of_video_sender_;
};
CastSender* CastSender::CreateCastSender(
- scoped_refptr<CastThread> cast_thread,
+ scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacketSender* const packet_sender) {
- return new CastSenderImpl(cast_thread,
+ return new CastSenderImpl(cast_environment,
audio_config,
video_config,
video_encoder_controller,
@@ -154,23 +163,32 @@ CastSender* CastSender::CreateCastSender(
}
CastSenderImpl::CastSenderImpl(
- scoped_refptr<CastThread> cast_thread,
+ scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacketSender* const packet_sender)
- : pacer_(cast_thread, packet_sender),
- audio_sender_(cast_thread, audio_config, &pacer_),
- video_sender_(cast_thread, video_config, video_encoder_controller,
+ : pacer_(cast_environment, packet_sender),
+ audio_sender_(cast_environment, audio_config, &pacer_),
+ video_sender_(cast_environment, video_config, video_encoder_controller,
&pacer_),
- frame_input_(new LocalFrameInput(cast_thread, audio_sender_.AsWeakPtr(),
+ frame_input_(new LocalFrameInput(cast_environment,
+ audio_sender_.AsWeakPtr(),
video_sender_.AsWeakPtr())),
- packet_receiver_(new LocalCastSenderPacketReceiver(cast_thread,
+ packet_receiver_(new LocalCastSenderPacketReceiver(cast_environment,
audio_sender_.AsWeakPtr(), video_sender_.AsWeakPtr(),
audio_config.incoming_feedback_ssrc,
video_config.incoming_feedback_ssrc)) {}
CastSenderImpl::~CastSenderImpl() {}
+scoped_refptr<FrameInput> CastSenderImpl::frame_input() {
+ return frame_input_;
+}
+
+scoped_refptr<PacketReceiver> CastSenderImpl::packet_receiver() {
+ return packet_receiver_;
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/cast_sender_impl.h b/chromium/media/cast/cast_sender_impl.h
index eb19caa247b..2c5dd222e1a 100644
--- a/chromium/media/cast/cast_sender_impl.h
+++ b/chromium/media/cast/cast_sender_impl.h
@@ -8,12 +8,16 @@
#include "base/memory/scoped_ptr.h"
#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/cast_sender.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/video_sender/video_sender.h"
namespace media {
+ class VideoFrame;
+}
+
+namespace media {
namespace cast {
class AudioSender;
@@ -24,7 +28,7 @@ class VideoSender;
// together such as pacer, packet receiver, frame input, audio and video sender.
class CastSenderImpl : public CastSender {
public:
- CastSenderImpl(scoped_refptr<CastThread> cast_thread,
+ CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
@@ -32,13 +36,8 @@ class CastSenderImpl : public CastSender {
virtual ~CastSenderImpl();
- virtual scoped_refptr<FrameInput> frame_input() OVERRIDE {
- return frame_input_;
- }
-
- virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE {
- return packet_receiver_;
- }
+ virtual scoped_refptr<FrameInput> frame_input() OVERRIDE;
+ virtual scoped_refptr<PacketReceiver> packet_receiver() OVERRIDE;
private:
PacedSender pacer_;
diff --git a/chromium/media/cast/cast_thread.cc b/chromium/media/cast/cast_thread.cc
deleted file mode 100644
index 4d294c46568..00000000000
--- a/chromium/media/cast/cast_thread.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/cast_thread.h"
-
-#include "base/logging.h"
-
-using base::TaskRunner;
-
-namespace media {
-namespace cast {
-
-CastThread::CastThread(
- scoped_refptr<TaskRunner> main_thread_proxy,
- scoped_refptr<TaskRunner> audio_encode_thread_proxy,
- scoped_refptr<TaskRunner> audio_decode_thread_proxy,
- scoped_refptr<TaskRunner> video_encode_thread_proxy,
- scoped_refptr<TaskRunner> video_decode_thread_proxy)
- : main_thread_proxy_(main_thread_proxy),
- audio_encode_thread_proxy_(audio_encode_thread_proxy),
- audio_decode_thread_proxy_(audio_decode_thread_proxy),
- video_encode_thread_proxy_(video_encode_thread_proxy),
- video_decode_thread_proxy_(video_decode_thread_proxy) {
- DCHECK(main_thread_proxy) << "Main thread required";
-}
-
-bool CastThread::PostTask(ThreadId identifier,
- const tracked_objects::Location& from_here,
- const base::Closure& task) {
- scoped_refptr<TaskRunner> task_runner =
- GetMessageTaskRunnerForThread(identifier);
-
- return task_runner->PostTask(from_here, task);
-}
-
-bool CastThread::PostDelayedTask(ThreadId identifier,
- const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) {
- scoped_refptr<TaskRunner> task_runner =
- GetMessageTaskRunnerForThread(identifier);
-
- return task_runner->PostDelayedTask(from_here, task, delay);
-}
-
-scoped_refptr<TaskRunner> CastThread::GetMessageTaskRunnerForThread(
- ThreadId identifier) {
- switch (identifier) {
- case CastThread::MAIN:
- return main_thread_proxy_;
- case CastThread::AUDIO_ENCODER:
- return audio_encode_thread_proxy_;
- case CastThread::AUDIO_DECODER:
- return audio_decode_thread_proxy_;
- case CastThread::VIDEO_ENCODER:
- return video_encode_thread_proxy_;
- case CastThread::VIDEO_DECODER:
- return video_decode_thread_proxy_;
- }
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/congestion_control/congestion_control.cc b/chromium/media/cast/congestion_control/congestion_control.cc
index f8ca98c2c9a..35687e7477a 100644
--- a/chromium/media/cast/congestion_control/congestion_control.cc
+++ b/chromium/media/cast/congestion_control/congestion_control.cc
@@ -20,16 +20,16 @@ static const int kCongestionControlMaxBitrateIncreasePerMillisecond = 1200;
static const int64 kMaxElapsedTimeMs = kCongestionControlMaxChangeIntervalMs;
-CongestionControl::CongestionControl(float congestion_control_back_off,
+CongestionControl::CongestionControl(base::TickClock* clock,
+ float congestion_control_back_off,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
uint32 start_bitrate)
- : congestion_control_back_off_(congestion_control_back_off),
+ : clock_(clock),
+ congestion_control_back_off_(congestion_control_back_off),
max_bitrate_configured_(max_bitrate_configured),
min_bitrate_configured_(min_bitrate_configured),
- bitrate_(start_bitrate),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {
+ bitrate_(start_bitrate) {
DCHECK_GT(congestion_control_back_off, 0.0f) << "Invalid config";
DCHECK_LT(congestion_control_back_off, 1.0f) << "Invalid config";
DCHECK_GE(max_bitrate_configured, min_bitrate_configured) << "Invalid config";
@@ -37,6 +37,9 @@ CongestionControl::CongestionControl(float congestion_control_back_off,
DCHECK_GE(start_bitrate, min_bitrate_configured) << "Invalid config";
}
+CongestionControl::~CongestionControl() {
+}
+
bool CongestionControl::OnAck(base::TimeDelta rtt, uint32* new_bitrate) {
base::TimeTicks now = clock_->NowTicks();
diff --git a/chromium/media/cast/congestion_control/congestion_control.gypi b/chromium/media/cast/congestion_control/congestion_control.gypi
index 9f1accf3f27..20a57ca2a30 100644
--- a/chromium/media/cast/congestion_control/congestion_control.gypi
+++ b/chromium/media/cast/congestion_control/congestion_control.gypi
@@ -16,7 +16,6 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/chromium/media/cast/congestion_control/congestion_control.h b/chromium/media/cast/congestion_control/congestion_control.h
index f1b9b280dcc..df88151eb8f 100644
--- a/chromium/media/cast/congestion_control/congestion_control.h
+++ b/chromium/media/cast/congestion_control/congestion_control.h
@@ -7,7 +7,6 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
@@ -16,12 +15,13 @@ namespace cast {
class CongestionControl {
public:
- CongestionControl(float congestion_control_back_off,
+ CongestionControl(base::TickClock* clock,
+ float congestion_control_back_off,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
uint32 start_bitrate);
- virtual ~CongestionControl() {}
+ virtual ~CongestionControl();
// Don't call OnAck if the same message contain a NACK.
// Returns true if the bitrate have changed.
@@ -30,11 +30,9 @@ class CongestionControl {
// Returns true if the bitrate have changed.
bool OnNack(base::TimeDelta rtt_ms, uint32* new_bitrate);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
private:
+ base::TickClock* const clock_; // Not owned by this class.
const float congestion_control_back_off_;
const uint32 max_bitrate_configured_;
const uint32 min_bitrate_configured_;
@@ -42,13 +40,10 @@ class CongestionControl {
base::TimeTicks time_last_increase_;
base::TimeTicks time_last_decrease_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
-
DISALLOW_COPY_AND_ASSIGN(CongestionControl);
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
+#endif // MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
diff --git a/chromium/media/cast/congestion_control/congestion_control_unittest.cc b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
index eff0a8c1e6f..108d2b340b7 100644
--- a/chromium/media/cast/congestion_control/congestion_control_unittest.cc
+++ b/chromium/media/cast/congestion_control/congestion_control_unittest.cc
@@ -13,21 +13,39 @@ namespace cast {
static const uint32 kMaxBitrateConfigured = 5000000;
static const uint32 kMinBitrateConfigured = 500000;
static const uint32 kStartBitrate = 2000000;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const int64 kRttMs = 20;
static const int64 kAckRateMs = 33;
-static const int64 kNackRateMs = 10;
class CongestionControlTest : public ::testing::Test {
protected:
CongestionControlTest()
- : congestion_control_(kDefaultCongestionControlBackOff,
+ : congestion_control_(&testing_clock_,
+ kDefaultCongestionControlBackOff,
kMaxBitrateConfigured,
kMinBitrateConfigured,
kStartBitrate) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
- congestion_control_.set_clock(&testing_clock_);
+ }
+
+ // Returns the last bitrate of the run.
+ uint32 RunWithOneLossEventPerSecond(int fps, int rtt_ms,
+ int runtime_in_seconds) {
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(rtt_ms);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(GG_INT64_C(1000) / fps);
+ uint32 new_bitrate = 0;
+ EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
+
+ for (int seconds = 0; seconds < runtime_in_seconds; ++seconds) {
+ for (int i = 1; i < fps; ++i) {
+ testing_clock_.Advance(ack_rate);
+ congestion_control_.OnAck(rtt, &new_bitrate);
+ }
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ }
+ return new_bitrate;
}
base::SimpleTestTickClock testing_clock_;
@@ -36,8 +54,9 @@ class CongestionControlTest : public ::testing::Test {
TEST_F(CongestionControlTest, Max) {
uint32 new_bitrate = 0;
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
EXPECT_FALSE(congestion_control_.OnAck(rtt, &new_bitrate));
uint32 expected_increase_bitrate = 0;
@@ -56,8 +75,9 @@ TEST_F(CongestionControlTest, Max) {
TEST_F(CongestionControlTest, Min) {
uint32 new_bitrate = 0;
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
EXPECT_FALSE(congestion_control_.OnNack(rtt, &new_bitrate));
uint32 expected_decrease_bitrate = kStartBitrate;
@@ -65,19 +85,20 @@ TEST_F(CongestionControlTest, Min) {
// Expected number is 10. 2000 * 0.875^10 <= 500.
for (int i = 0; i < 10; ++i) {
testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- expected_decrease_bitrate = static_cast<uint32>(
- expected_decrease_bitrate * kDefaultCongestionControlBackOff);
- EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
- }
- testing_clock_.Advance(ack_rate);
- EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
- EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ expected_decrease_bitrate = static_cast<uint32>(
+ expected_decrease_bitrate * kDefaultCongestionControlBackOff);
+ EXPECT_EQ(expected_decrease_bitrate, new_bitrate);
+ }
+ testing_clock_.Advance(ack_rate);
+ EXPECT_TRUE(congestion_control_.OnNack(rtt, &new_bitrate));
+ EXPECT_EQ(kMinBitrateConfigured, new_bitrate);
}
TEST_F(CongestionControlTest, Timing) {
- base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
- base::TimeDelta ack_rate = base::TimeDelta::FromMilliseconds(kAckRateMs);
+ const base::TimeDelta rtt = base::TimeDelta::FromMilliseconds(kRttMs);
+ const base::TimeDelta ack_rate =
+ base::TimeDelta::FromMilliseconds(kAckRateMs);
uint32 new_bitrate = 0;
uint32 expected_bitrate = kStartBitrate;
@@ -135,5 +156,25 @@ TEST_F(CongestionControlTest, Timing) {
EXPECT_EQ(expected_bitrate, new_bitrate);
}
+TEST_F(CongestionControlTest, Convergence24fps) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, kRttMs, 100),
+ GG_UINT32_C(3000000));
+}
+
+TEST_F(CongestionControlTest, Convergence24fpsLongRtt) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(24, 100, 100),
+ GG_UINT32_C(500000));
+}
+
+TEST_F(CongestionControlTest, Convergence60fps) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, kRttMs, 100),
+ GG_UINT32_C(3500000));
+}
+
+TEST_F(CongestionControlTest, Convergence60fpsLongRtt) {
+ EXPECT_GE(RunWithOneLossEventPerSecond(60, 100, 100),
+ GG_UINT32_C(500000));
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/framer/cast_message_builder.cc b/chromium/media/cast/framer/cast_message_builder.cc
index eec12112e03..7d89f744315 100644
--- a/chromium/media/cast/framer/cast_message_builder.cc
+++ b/chromium/media/cast/framer/cast_message_builder.cc
@@ -4,18 +4,20 @@
#include "media/cast/framer/cast_message_builder.h"
+#include "media/cast/cast_defines.h"
+
namespace media {
namespace cast {
-static const uint16 kCompleteFrameLost = 0xffff;
-
CastMessageBuilder::CastMessageBuilder(
+ base::TickClock* clock,
RtpPayloadFeedback* incoming_payload_feedback,
FrameIdMap* frame_id_map,
uint32 media_ssrc,
bool decoder_faster_than_max_frame_rate,
int max_unacked_frames)
- : cast_feedback_(incoming_payload_feedback),
+ : clock_(clock),
+ cast_feedback_(incoming_payload_feedback),
frame_id_map_(frame_id_map),
media_ssrc_(media_ssrc),
decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
@@ -24,15 +26,13 @@ CastMessageBuilder::CastMessageBuilder(
waiting_for_key_frame_(true),
slowing_down_ack_(false),
acked_last_frame_(true),
- last_acked_frame_id_(kStartFrameId),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {
+ last_acked_frame_id_(kStartFrameId) {
cast_msg_.ack_frame_id_ = kStartFrameId;
}
CastMessageBuilder::~CastMessageBuilder() {}
-void CastMessageBuilder::CompleteFrameReceived(uint8 frame_id,
+void CastMessageBuilder::CompleteFrameReceived(uint32 frame_id,
bool is_key_frame) {
if (last_update_time_.is_null()) {
// Our first update.
@@ -52,12 +52,12 @@ void CastMessageBuilder::CompleteFrameReceived(uint8 frame_id,
// packet in the key-frame.
UpdateAckMessage();
} else {
- if (!UpdateAckMessage())
- return;
+ if (!UpdateAckMessage()) return;
BuildPacketList();
}
// Send cast message.
+ VLOG(1) << "Send cast message Ack:" << static_cast<int>(frame_id);
cast_feedback_->CastFeedback(cast_msg_);
}
@@ -83,7 +83,7 @@ bool CastMessageBuilder::UpdateAckMessage() {
// time; and it's not needed since we can skip frames to catch up.
}
} else {
- uint8 frame_id = frame_id_map_->LastContinuousFrame();
+ uint32 frame_id = frame_id_map_->LastContinuousFrame();
// Is it a new frame?
if (last_acked_frame_id_ == frame_id) return false;
@@ -153,9 +153,8 @@ void CastMessageBuilder::BuildPacketList() {
// Are we missing packets?
if (frame_id_map_->Empty()) return;
- uint8 newest_frame_id = frame_id_map_->NewestFrameId();
- uint8 next_expected_frame_id =
- static_cast<uint8>(cast_msg_.ack_frame_id_ + 1);
+ uint32 newest_frame_id = frame_id_map_->NewestFrameId();
+ uint32 next_expected_frame_id = cast_msg_.ack_frame_id_ + 1;
// Iterate over all frames.
for (; !IsNewerFrameId(next_expected_frame_id, newest_frame_id);
@@ -183,7 +182,7 @@ void CastMessageBuilder::BuildPacketList() {
}
} else {
time_last_nacked_map_[next_expected_frame_id] = now;
- missing.insert(kCompleteFrameLost);
+ missing.insert(kRtcpCastAllPacketsLost);
cast_msg_.missing_frames_and_packets_[next_expected_frame_id] = missing;
}
}
diff --git a/chromium/media/cast/framer/cast_message_builder.h b/chromium/media/cast/framer/cast_message_builder.h
index b941178b633..b76a196111c 100644
--- a/chromium/media/cast/framer/cast_message_builder.h
+++ b/chromium/media/cast/framer/cast_message_builder.h
@@ -11,38 +11,36 @@
#include "media/cast/framer/frame_id_map.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
class RtpPayloadFeedback;
-typedef std::map<uint8, base::TimeTicks> TimeLastNackMap;
+typedef std::map<uint32, base::TimeTicks> TimeLastNackMap;
class CastMessageBuilder {
public:
- CastMessageBuilder(RtpPayloadFeedback* incoming_payload_feedback,
+ CastMessageBuilder(base::TickClock* clock,
+ RtpPayloadFeedback* incoming_payload_feedback,
FrameIdMap* frame_id_map,
uint32 media_ssrc,
bool decoder_faster_than_max_frame_rate,
int max_unacked_frames);
~CastMessageBuilder();
- void CompleteFrameReceived(uint8 frame_id, bool is_key_frame);
+ void CompleteFrameReceived(uint32 frame_id, bool is_key_frame);
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void UpdateCastMessage();
void Reset();
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
-
private:
bool UpdateAckMessage();
void BuildPacketList();
bool UpdateCastMessageInternal(RtcpCastMessage* message);
+ base::TickClock* const clock_; // Not owned by this class.
RtpPayloadFeedback* const cast_feedback_;
// CastMessageBuilder has only const access to the frame id mapper.
@@ -59,10 +57,7 @@ class CastMessageBuilder {
bool slowing_down_ack_;
bool acked_last_frame_;
- uint8 last_acked_frame_id_;
-
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
+ uint32 last_acked_frame_id_;
DISALLOW_COPY_AND_ASSIGN(CastMessageBuilder);
};
diff --git a/chromium/media/cast/framer/cast_message_builder_unittest.cc b/chromium/media/cast/framer/cast_message_builder_unittest.cc
index f9bb0668d82..f4b708c90ef 100644
--- a/chromium/media/cast/framer/cast_message_builder_unittest.cc
+++ b/chromium/media/cast/framer/cast_message_builder_unittest.cc
@@ -6,7 +6,7 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/framer/cast_message_builder.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -15,9 +15,10 @@ namespace cast {
static const uint32 kSsrc = 0x1234;
static const uint32 kShortTimeIncrementMs = 10;
static const uint32 kLongTimeIncrementMs = 40;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
-typedef std::map<uint8, int> MissingPacketsMap;
+namespace {
+typedef std::map<uint32, size_t> MissingPacketsMap;
class NackFeedbackVerification : public RtpPayloadFeedback {
public:
@@ -38,14 +39,21 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
// Keep track of the number of missing packets per frame.
missing_packets_.clear();
while (frame_it != cast_feedback.missing_frames_and_packets_.end()) {
+ // Check for complete frame lost.
+ if ((frame_it->second.size() == 1) &&
+ (*frame_it->second.begin() == kRtcpCastAllPacketsLost)) {
+ missing_packets_.insert(
+ std::make_pair(frame_it->first, kRtcpCastAllPacketsLost));
+ } else {
missing_packets_.insert(
std::make_pair(frame_it->first, frame_it->second.size()));
+ }
++frame_it;
}
triggered_ = true;
}
- int num_missing_packets(uint8 frame_id) {
+ size_t num_missing_packets(uint32 frame_id) {
MissingPacketsMap::iterator it;
it = missing_packets_.find(frame_id);
if (it == missing_packets_.end()) return 0;
@@ -60,18 +68,20 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
return ret_val;
}
- uint8 last_frame_acked() { return last_frame_acked_; }
+ uint32 last_frame_acked() { return last_frame_acked_; }
private:
bool triggered_;
MissingPacketsMap missing_packets_; // Missing packets per frame.
- uint8 last_frame_acked_;
+ uint32 last_frame_acked_;
};
+} // namespace
class CastMessageBuilderTest : public ::testing::Test {
protected:
CastMessageBuilderTest()
- : cast_msg_builder_(new CastMessageBuilder(&feedback_,
+ : cast_msg_builder_(new CastMessageBuilder(&testing_clock_,
+ &feedback_,
&frame_id_map_,
kSsrc,
true,
@@ -80,12 +90,11 @@ class CastMessageBuilderTest : public ::testing::Test {
rtp_header_.is_key_frame = false;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
- cast_msg_builder_->set_clock(&testing_clock_);
}
- ~CastMessageBuilderTest() {}
+ virtual ~CastMessageBuilderTest() {}
- void SetFrameId(uint8 frame_id) {
+ void SetFrameId(uint32 frame_id) {
rtp_header_.frame_id = frame_id;
}
@@ -101,7 +110,7 @@ class CastMessageBuilderTest : public ::testing::Test {
rtp_header_.is_key_frame = is_key;
}
- void SetReferenceFrameId(uint8 reference_frame_id) {
+ void SetReferenceFrameId(uint32 reference_frame_id) {
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = reference_frame_id;
}
@@ -117,7 +126,8 @@ class CastMessageBuilderTest : public ::testing::Test {
}
void SetDecoderSlowerThanMaxFrameRate(int max_unacked_frames) {
- cast_msg_builder_.reset(new CastMessageBuilder(&feedback_,
+ cast_msg_builder_.reset(new CastMessageBuilder(&testing_clock_,
+ &feedback_,
&frame_id_map_,
kSsrc,
false,
@@ -148,7 +158,7 @@ TEST_F(CastMessageBuilderTest, StartWithAKeyFrame) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, OneFrameNackList) {
@@ -164,11 +174,10 @@ TEST_F(CastMessageBuilderTest, OneFrameNackList) {
SetPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ EXPECT_EQ(4u, feedback_.num_missing_packets(0));
}
TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
- // TODO(mikhal): Add indication.
SetFrameId(0);
SetPacketId(2);
SetMaxPacketId(5);
@@ -179,6 +188,8 @@ TEST_F(CastMessageBuilderTest, CompleteFrameMissing) {
SetPacketId(2);
SetMaxPacketId(5);
InsertPacket();
+ EXPECT_TRUE(feedback_.triggered());
+ EXPECT_EQ(kRtcpCastAllPacketsLost, feedback_.num_missing_packets(1));
}
TEST_F(CastMessageBuilderTest, FastForwardAck) {
@@ -194,7 +205,7 @@ TEST_F(CastMessageBuilderTest, FastForwardAck) {
SetMaxPacketId(0);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(255, feedback_.last_frame_acked());
+ EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(0);
@@ -203,7 +214,7 @@ TEST_F(CastMessageBuilderTest, FastForwardAck) {
SetKeyFrame(true);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(2, feedback_.last_frame_acked());
+ EXPECT_EQ(2u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
@@ -226,7 +237,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
SetMaxPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(255, feedback_.last_frame_acked());
+ EXPECT_EQ(kStartFrameId, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(5);
@@ -239,7 +250,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
frame_id_map_.RemoveOldFrames(5); // Simulate 5 being pulled for rendering.
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
SetFrameId(1);
@@ -251,7 +262,7 @@ TEST_F(CastMessageBuilderTest, RemoveOldFrames) {
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(5, feedback_.last_frame_acked());
+ EXPECT_EQ(5u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, WrapFastForward) {
@@ -269,16 +280,16 @@ TEST_F(CastMessageBuilderTest, WrapFastForward) {
SetKeyFrame(false);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253, feedback_.last_frame_acked());
+ EXPECT_EQ(253u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
- SetFrameId(0);
+ SetFrameId(256);
SetPacketId(0);
SetMaxPacketId(0);
SetKeyFrame(false);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(253, feedback_.last_frame_acked());
+ EXPECT_EQ(253u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
SetFrameId(254);
@@ -287,7 +298,7 @@ TEST_F(CastMessageBuilderTest, WrapFastForward) {
SetKeyFrame(true);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.last_frame_acked());
+ EXPECT_EQ(256u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
@@ -301,7 +312,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacket) {
SetPacketId(5);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ EXPECT_EQ(4u, feedback_.num_missing_packets(0));
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
@@ -317,7 +328,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ EXPECT_EQ(4u, feedback_.num_missing_packets(0));
SetFrameId(1);
SetMaxPacketId(2);
SetPacketId(0);
@@ -326,7 +337,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextFrame) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(21 - 2, feedback_.num_missing_packets(0));
+ EXPECT_EQ(19u, feedback_.num_missing_packets(0));
}
TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
@@ -342,7 +353,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(4, feedback_.num_missing_packets(0));
+ EXPECT_EQ(4u, feedback_.num_missing_packets(0));
SetFrameId(1);
SetMaxPacketId(0);
SetPacketId(0);
@@ -351,7 +362,7 @@ TEST_F(CastMessageBuilderTest, NackUntilMaxReceivedPacketNextKey) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.num_missing_packets(0));
+ EXPECT_EQ(0u, feedback_.num_missing_packets(0));
}
TEST_F(CastMessageBuilderTest, Reset) {
@@ -363,7 +374,7 @@ TEST_F(CastMessageBuilderTest, Reset) {
// Should reset nack list state and request a key frame.
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.num_missing_packets(0));
+ EXPECT_EQ(0u, feedback_.num_missing_packets(0));
}
TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
@@ -373,7 +384,7 @@ TEST_F(CastMessageBuilderTest, DeltaAfterReset) {
SetKeyFrame(true);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.num_missing_packets(0));
+ EXPECT_EQ(0u, feedback_.num_missing_packets(0));
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
cast_msg_builder_->Reset();
@@ -393,19 +404,19 @@ TEST_F(CastMessageBuilderTest, BasicRps) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.last_frame_acked());
+ EXPECT_EQ(0u, feedback_.last_frame_acked());
SetFrameId(3);
SetKeyFrame(false);
SetReferenceFrameId(0);
InsertPacket();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.last_frame_acked());
+ EXPECT_EQ(0u, feedback_.last_frame_acked());
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kLongTimeIncrementMs));
frame_id_map_.RemoveOldFrames(3); // Simulate 3 being pulled for rendering.
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(3, feedback_.last_frame_acked());
+ EXPECT_EQ(3u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, InOrderRps) {
@@ -418,7 +429,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(0, feedback_.last_frame_acked());
+ EXPECT_EQ(0u, feedback_.last_frame_acked());
SetFrameId(1);
SetPacketId(0);
SetMaxPacketId(1);
@@ -440,7 +451,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
cast_msg_builder_->UpdateCastMessage();
EXPECT_TRUE(feedback_.triggered());
- EXPECT_EQ(3, feedback_.last_frame_acked());
+ EXPECT_EQ(3u, feedback_.last_frame_acked());
// Make an old frame complete - should not trigger an ack.
SetFrameId(1);
SetPacketId(1);
@@ -450,7 +461,7 @@ TEST_F(CastMessageBuilderTest, InOrderRps) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
EXPECT_FALSE(feedback_.triggered());
- EXPECT_EQ(3, feedback_.last_frame_acked());
+ EXPECT_EQ(3u, feedback_.last_frame_acked());
}
TEST_F(CastMessageBuilderTest, SlowDownAck) {
@@ -461,7 +472,7 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
SetKeyFrame(true);
InsertPacket();
- int frame_id;
+ uint32 frame_id;
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
SetKeyFrame(false);
@@ -474,7 +485,7 @@ TEST_F(CastMessageBuilderTest, SlowDownAck) {
base::TimeDelta::FromMilliseconds(kShortTimeIncrementMs));
}
// We should now have entered the slowdown ACK state.
- uint8_t expected_frame_id = 1;
+ uint32 expected_frame_id = 1;
for (; frame_id < 10; ++frame_id) {
if (frame_id % 2) ++expected_frame_id;
EXPECT_TRUE(feedback_.triggered());
diff --git a/chromium/media/cast/framer/frame_buffer.cc b/chromium/media/cast/framer/frame_buffer.cc
index ed7e11f0ce0..ca9f1dedd28 100644
--- a/chromium/media/cast/framer/frame_buffer.cc
+++ b/chromium/media/cast/framer/frame_buffer.cc
@@ -4,6 +4,8 @@
#include "media/cast/framer/frame_buffer.h"
+#include "base/logging.h"
+
namespace media {
namespace cast {
@@ -19,7 +21,7 @@ FrameBuffer::FrameBuffer()
FrameBuffer::~FrameBuffer() {}
void FrameBuffer::InsertPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header) {
// Is this the first packet in the frame?
if (packets_.empty()) {
@@ -29,7 +31,7 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
if (rtp_header.is_reference) {
last_referenced_frame_id_ = rtp_header.reference_frame_id;
} else {
- last_referenced_frame_id_ = static_cast<uint8>(rtp_header.frame_id - 1);
+ last_referenced_frame_id_ = rtp_header.frame_id - 1;
}
rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
@@ -38,7 +40,11 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
if (rtp_header.frame_id != frame_id_) return;
// Insert every packet only once.
- if (packets_.find(rtp_header.packet_id) != packets_.end()) return;
+ if (packets_.find(rtp_header.packet_id) != packets_.end()) {
+ VLOG(3) << "Packet already received, ignored: frame "
+ << frame_id_ << ", packet " << rtp_header.packet_id;
+ return;
+ }
std::vector<uint8> data;
std::pair<PacketMap::iterator, bool> retval =
diff --git a/chromium/media/cast/framer/frame_buffer.h b/chromium/media/cast/framer/frame_buffer.h
index d2b52cb409e..b99f2b2582d 100644
--- a/chromium/media/cast/framer/frame_buffer.h
+++ b/chromium/media/cast/framer/frame_buffer.h
@@ -9,7 +9,7 @@
#include <vector>
#include "media/cast/cast_config.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -21,7 +21,7 @@ class FrameBuffer {
FrameBuffer();
~FrameBuffer();
void InsertPacket(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader& rtp_header);
bool Complete() const;
@@ -32,16 +32,16 @@ class FrameBuffer {
uint32* rtp_timestamp) const;
bool is_key_frame() const { return is_key_frame_; }
- uint8 frame_id() const { return frame_id_; }
- uint8 last_referenced_frame_id() const { return last_referenced_frame_id_; }
+
+ uint32 last_referenced_frame_id() const { return last_referenced_frame_id_; }
private:
- uint8 frame_id_;
+ uint32 frame_id_;
uint16 max_packet_id_;
uint16 num_packets_received_;
bool is_key_frame_;
- int total_data_size_;
- uint8 last_referenced_frame_id_;
+ size_t total_data_size_;
+ uint32 last_referenced_frame_id_;
uint32 rtp_timestamp_;
PacketMap packets_;
diff --git a/chromium/media/cast/framer/frame_buffer_unittest.cc b/chromium/media/cast/framer/frame_buffer_unittest.cc
index 26998f5fd7e..fb14da39f7f 100644
--- a/chromium/media/cast/framer/frame_buffer_unittest.cc
+++ b/chromium/media/cast/framer/frame_buffer_unittest.cc
@@ -12,9 +12,9 @@ class FrameBufferTest : public ::testing::Test {
protected:
FrameBufferTest() {}
- ~FrameBufferTest() {}
+ virtual ~FrameBufferTest() {}
- void SetUp() {
+ virtual void SetUp() {
payload_.assign(kIpPacketSize, 0);
// Build a default one packet frame - populate webrtc header.
diff --git a/chromium/media/cast/framer/frame_id_map.cc b/chromium/media/cast/framer/frame_id_map.cc
index cf866845227..bd9b943371c 100644
--- a/chromium/media/cast/framer/frame_id_map.cc
+++ b/chromium/media/cast/framer/frame_id_map.cc
@@ -5,13 +5,13 @@
#include "media/cast/framer/frame_id_map.h"
#include "base/logging.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
-FrameInfo::FrameInfo(uint8 frame_id,
- uint8 referenced_frame_id,
+FrameInfo::FrameInfo(uint32 frame_id,
+ uint32 referenced_frame_id,
uint16 max_packet_id,
bool key_frame)
: is_key_frame_(key_frame),
@@ -63,19 +63,23 @@ FrameIdMap::FrameIdMap()
FrameIdMap::~FrameIdMap() {}
bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
- uint8 frame_id = rtp_header.frame_id;
- uint8 reference_frame_id;
+ uint32 frame_id = rtp_header.frame_id;
+ uint32 reference_frame_id;
if (rtp_header.is_reference) {
reference_frame_id = rtp_header.reference_frame_id;
} else {
- reference_frame_id = static_cast<uint8>(frame_id - 1);
+ reference_frame_id = static_cast<uint32>(frame_id - 1);
}
if (rtp_header.is_key_frame && waiting_for_key_) {
- last_released_frame_ = static_cast<uint8>(frame_id - 1);
+ last_released_frame_ = static_cast<uint32>(frame_id - 1);
waiting_for_key_ = false;
}
+ VLOG(1) << "InsertPacket frame:" << frame_id
+ << " packet:" << static_cast<int>(rtp_header.packet_id)
+ << " max packet:" << static_cast<int>(rtp_header.max_packet_id);
+
if (IsOlderFrameId(frame_id, last_released_frame_) && !waiting_for_key_) {
return false;
}
@@ -104,7 +108,7 @@ bool FrameIdMap::InsertPacket(const RtpCastHeader& rtp_header, bool* complete) {
return true;
}
-void FrameIdMap::RemoveOldFrames(uint8 frame_id) {
+void FrameIdMap::RemoveOldFrames(uint32 frame_id) {
FrameMap::iterator it = frame_map_.begin();
while (it != frame_map_.end()) {
@@ -125,11 +129,11 @@ void FrameIdMap::Clear() {
newest_frame_id_ = kStartFrameId;
}
-uint8 FrameIdMap::NewestFrameId() const {
+uint32 FrameIdMap::NewestFrameId() const {
return newest_frame_id_;
}
-bool FrameIdMap::NextContinuousFrame(uint8* frame_id) const {
+bool FrameIdMap::NextContinuousFrame(uint32* frame_id) const {
FrameMap::const_iterator it;
for (it = frame_map_.begin(); it != frame_map_.end(); ++it) {
@@ -141,9 +145,9 @@ bool FrameIdMap::NextContinuousFrame(uint8* frame_id) const {
return false;
}
-uint8 FrameIdMap::LastContinuousFrame() const {
- uint8 last_continuous_frame_id = last_released_frame_;
- uint8 next_expected_frame = last_released_frame_;
+uint32 FrameIdMap::LastContinuousFrame() const {
+ uint32 last_continuous_frame_id = last_released_frame_;
+ uint32 next_expected_frame = last_released_frame_;
FrameMap::const_iterator it;
@@ -159,7 +163,7 @@ uint8 FrameIdMap::LastContinuousFrame() const {
return last_continuous_frame_id;
}
-bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint8* frame_id) const {
+bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint32* frame_id) const {
// First check if we have continuous frames.
if (NextContinuousFrame(frame_id)) return true;
@@ -187,7 +191,7 @@ bool FrameIdMap::NextAudioFrameAllowingMissingFrames(uint8* frame_id) const {
return true;
}
-bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const {
+bool FrameIdMap::NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const {
// Find the oldest decodable frame.
FrameMap::const_iterator it_best_match = frame_map_.end();
FrameMap::const_iterator it;
@@ -217,11 +221,11 @@ int FrameIdMap::NumberOfCompleteFrames() const {
return count;
}
-bool FrameIdMap::FrameExists(uint8 frame_id) const {
+bool FrameIdMap::FrameExists(uint32 frame_id) const {
return frame_map_.end() != frame_map_.find(frame_id);
}
-void FrameIdMap::GetMissingPackets(uint8 frame_id,
+void FrameIdMap::GetMissingPackets(uint32 frame_id,
bool last_frame,
PacketIdSet* missing_packets) const {
FrameMap::const_iterator it = frame_map_.find(frame_id);
@@ -233,7 +237,7 @@ void FrameIdMap::GetMissingPackets(uint8 frame_id,
bool FrameIdMap::ContinuousFrame(FrameInfo* frame) const {
DCHECK(frame);
if (waiting_for_key_ && !frame->is_key_frame()) return false;
- return static_cast<uint8>(last_released_frame_ + 1) == frame->frame_id();
+ return static_cast<uint32>(last_released_frame_ + 1) == frame->frame_id();
}
bool FrameIdMap::DecodableVideoFrame(FrameInfo* frame) const {
diff --git a/chromium/media/cast/framer/frame_id_map.h b/chromium/media/cast/framer/frame_id_map.h
index 6bf72a0d692..40b0a7f3399 100644
--- a/chromium/media/cast/framer/frame_id_map.h
+++ b/chromium/media/cast/framer/frame_id_map.h
@@ -12,33 +12,33 @@
#include "base/memory/scoped_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
class FrameInfo {
public:
- FrameInfo(uint8 frame_id,
- uint8 referenced_frame_id,
+ FrameInfo(uint32 frame_id,
+ uint32 referenced_frame_id,
uint16 max_packet_id,
bool key_frame);
~FrameInfo();
- // Returns true if frame is complete after the insert.
+ // Returns true if packet is inserted.
bool InsertPacket(uint16 packet_id);
bool Complete() const;
void GetMissingPackets(bool newest_frame,
PacketIdSet* missing_packets) const;
bool is_key_frame() const { return is_key_frame_; }
- uint8 frame_id() const { return frame_id_; }
- uint8 referenced_frame_id() const { return referenced_frame_id_; }
+ uint32 frame_id() const { return frame_id_; }
+ uint32 referenced_frame_id() const { return referenced_frame_id_; }
private:
const bool is_key_frame_;
- const uint8 frame_id_;
- const uint8 referenced_frame_id_;
+ const uint32 frame_id_;
+ const uint32 referenced_frame_id_;
uint16 max_received_packet_id_;
PacketIdSet missing_packets_;
@@ -46,7 +46,7 @@ class FrameInfo {
DISALLOW_COPY_AND_ASSIGN(FrameInfo);
};
-typedef std::map<uint8, linked_ptr<FrameInfo> > FrameMap;
+typedef std::map<uint32, linked_ptr<FrameInfo> > FrameMap;
class FrameIdMap {
public:
@@ -57,21 +57,21 @@ class FrameIdMap {
bool InsertPacket(const RtpCastHeader& rtp_header, bool* complete);
bool Empty() const;
- bool FrameExists(uint8 frame_id) const;
- uint8 NewestFrameId() const;
+ bool FrameExists(uint32 frame_id) const;
+ uint32 NewestFrameId() const;
- void RemoveOldFrames(uint8 frame_id);
+ void RemoveOldFrames(uint32 frame_id);
void Clear();
// Identifies the next frame to be released (rendered).
- bool NextContinuousFrame(uint8* frame_id) const;
- uint8 LastContinuousFrame() const;
+ bool NextContinuousFrame(uint32* frame_id) const;
+ uint32 LastContinuousFrame() const;
- bool NextAudioFrameAllowingMissingFrames(uint8* frame_id) const;
- bool NextVideoFrameAllowingSkippingFrames(uint8* frame_id) const;
+ bool NextAudioFrameAllowingMissingFrames(uint32* frame_id) const;
+ bool NextVideoFrameAllowingSkippingFrames(uint32* frame_id) const;
int NumberOfCompleteFrames() const;
- void GetMissingPackets(uint8 frame_id,
+ void GetMissingPackets(uint32 frame_id,
bool last_frame,
PacketIdSet* missing_packets) const;
@@ -81,8 +81,8 @@ class FrameIdMap {
FrameMap frame_map_;
bool waiting_for_key_;
- uint8 last_released_frame_;
- uint8 newest_frame_id_;
+ uint32 last_released_frame_;
+ uint32 newest_frame_id_;
DISALLOW_COPY_AND_ASSIGN(FrameIdMap);
};
diff --git a/chromium/media/cast/framer/framer.cc b/chromium/media/cast/framer/framer.cc
index 95048209dcd..b06e60fd035 100644
--- a/chromium/media/cast/framer/framer.cc
+++ b/chromium/media/cast/framer/framer.cc
@@ -11,13 +11,13 @@ namespace cast {
typedef FrameList::const_iterator ConstFrameIterator;
-Framer::Framer(RtpPayloadFeedback* incoming_payload_feedback,
+Framer::Framer(base::TickClock* clock,
+ RtpPayloadFeedback* incoming_payload_feedback,
uint32 ssrc,
bool decoder_faster_than_max_frame_rate,
int max_unacked_frames)
: decoder_faster_than_max_frame_rate_(decoder_faster_than_max_frame_rate),
- clock_(&default_tick_clock_),
- cast_msg_builder_(new CastMessageBuilder(incoming_payload_feedback,
+ cast_msg_builder_(new CastMessageBuilder(clock, incoming_payload_feedback,
&frame_id_map_, ssrc, decoder_faster_than_max_frame_rate,
max_unacked_frames)) {
DCHECK(incoming_payload_feedback) << "Invalid argument";
@@ -25,11 +25,11 @@ Framer::Framer(RtpPayloadFeedback* incoming_payload_feedback,
Framer::~Framer() {}
-void Framer::InsertPacket(const uint8* payload_data,
- int payload_size,
+bool Framer::InsertPacket(const uint8* payload_data,
+ size_t payload_size,
const RtpCastHeader& rtp_header) {
bool complete = false;
- if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return;
+ if (!frame_id_map_.InsertPacket(rtp_header, &complete)) return false;
// Does this packet belong to a new frame?
FrameList::iterator it = frames_.find(rtp_header.frame_id);
@@ -45,24 +45,23 @@ void Framer::InsertPacket(const uint8* payload_data,
if (complete) {
// ACK as soon as possible.
+ VLOG(1) << "Complete frame " << static_cast<int>(rtp_header.frame_id);
cast_msg_builder_->CompleteFrameReceived(rtp_header.frame_id,
rtp_header.is_key_frame);
}
+ return complete;
}
// This does not release the frame.
-bool Framer::GetEncodedAudioFrame(const base::TimeTicks& timeout,
- EncodedAudioFrame* audio_frame,
+bool Framer::GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
uint32* rtp_timestamp,
bool* next_frame) {
- uint8 frame_id;
+ uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
// We have our next frame.
*next_frame = true;
} else {
- if (WaitForNextFrame(timeout)) return false;
-
if (!frame_id_map_.NextAudioFrameAllowingMissingFrames(&frame_id)) {
return false;
}
@@ -77,18 +76,15 @@ bool Framer::GetEncodedAudioFrame(const base::TimeTicks& timeout,
}
// This does not release the frame.
-bool Framer::GetEncodedVideoFrame(const base::TimeTicks& timeout,
- EncodedVideoFrame* video_frame,
+bool Framer::GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
uint32* rtp_timestamp,
bool* next_frame) {
- uint8 frame_id;
+ uint32 frame_id;
// Find frame id.
if (frame_id_map_.NextContinuousFrame(&frame_id)) {
// We have our next frame.
*next_frame = true;
} else {
- if (WaitForNextFrame(timeout)) return false;
-
// Check if we can skip frames when our decoder is too slow.
if (!decoder_faster_than_max_frame_rate_) return false;
@@ -105,33 +101,30 @@ bool Framer::GetEncodedVideoFrame(const base::TimeTicks& timeout,
return it->second->GetEncodedVideoFrame(video_frame, rtp_timestamp);
}
-bool Framer::WaitForNextFrame(const base::TimeTicks& timeout) const {
- base::TimeDelta wait_time = timeout - clock_->NowTicks();
- if (wait_time.InMilliseconds() > 0)
- return true;
-
- return false;
-}
-
void Framer::Reset() {
frame_id_map_.Clear();
frames_.clear();
cast_msg_builder_->Reset();
}
-void Framer::ReleaseFrame(uint8 frame_id) {
+void Framer::ReleaseFrame(uint32 frame_id) {
frame_id_map_.RemoveOldFrames(frame_id);
frames_.erase(frame_id);
// We have a frame - remove all frames with lower frame id.
+ bool skipped_old_frame = false;
FrameList::iterator it;
for (it = frames_.begin(); it != frames_.end(); ) {
if (IsOlderFrameId(it->first, frame_id)) {
frames_.erase(it++);
+ skipped_old_frame = true;
} else {
++it;
}
}
+ if (skipped_old_frame) {
+ cast_msg_builder_->UpdateCastMessage();
+ }
}
bool Framer::TimeToSendNextCastMessage(base::TimeTicks* time_to_send) {
diff --git a/chromium/media/cast/framer/framer.h b/chromium/media/cast/framer/framer.h
index 93d79060607..cf72da6c35d 100644
--- a/chromium/media/cast/framer/framer.h
+++ b/chromium/media/cast/framer/framer.h
@@ -10,69 +10,58 @@
#include "base/basictypes.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/framer/cast_message_builder.h"
#include "media/cast/framer/frame_buffer.h"
#include "media/cast/framer/frame_id_map.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
-typedef std::map<uint8, linked_ptr<FrameBuffer> > FrameList;
+typedef std::map<uint32, linked_ptr<FrameBuffer> > FrameList;
class Framer {
public:
- Framer(RtpPayloadFeedback* incoming_payload_feedback,
+ Framer(base::TickClock* clock,
+ RtpPayloadFeedback* incoming_payload_feedback,
uint32 ssrc,
bool decoder_faster_than_max_frame_rate,
int max_unacked_frames);
~Framer();
- void InsertPacket(const uint8* payload_data,
- int payload_size,
+ // Return true when receiving the last packet in a frame, creating a
+ // complete frame.
+ bool InsertPacket(const uint8* payload_data,
+ size_t payload_size,
const RtpCastHeader& rtp_header);
// Extracts a complete encoded frame - will only return a complete continuous
// frame.
// Returns false if the frame does not exist or if the frame is not complete
// within the given time frame.
- bool GetEncodedVideoFrame(const base::TimeTicks& timeout,
- EncodedVideoFrame* video_frame,
+ bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
uint32* rtp_timestamp,
bool* next_frame);
- bool GetEncodedAudioFrame(const base::TimeTicks& timeout,
- EncodedAudioFrame* audio_frame,
+ bool GetEncodedAudioFrame(EncodedAudioFrame* audio_frame,
uint32* rtp_timestamp,
bool* next_frame);
- void ReleaseFrame(uint8 frame_id);
+ void ReleaseFrame(uint32 frame_id);
// Reset framer state to original state and flush all pending buffers.
void Reset();
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void SendCastMessage();
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- cast_msg_builder_->set_clock(clock);
- }
-
private:
- // Return true if we should wait.
- bool WaitForNextFrame(const base::TimeTicks& timeout) const;
-
const bool decoder_faster_than_max_frame_rate_;
FrameList frames_;
FrameIdMap frame_id_map_;
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
-
scoped_ptr<CastMessageBuilder> cast_msg_builder_;
DISALLOW_COPY_AND_ASSIGN(Framer);
diff --git a/chromium/media/cast/framer/framer_unittest.cc b/chromium/media/cast/framer/framer_unittest.cc
index 6f83706494f..871f048af46 100644
--- a/chromium/media/cast/framer/framer_unittest.cc
+++ b/chromium/media/cast/framer/framer_unittest.cc
@@ -4,25 +4,22 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/framer/framer.h"
-#include "media/cast/rtp_common/mock_rtp_payload_feedback.h"
+#include "media/cast/rtp_receiver/mock_rtp_payload_feedback.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-static const int64 kFrameTimeMillisecond = 33;
-
class FramerTest : public ::testing::Test {
protected:
FramerTest()
: mock_rtp_payload_feedback_(),
- framer_(&mock_rtp_payload_feedback_, 0, true, 0) {
- framer_.set_clock(&testing_clock_);
+ framer_(&testing_clock_, &mock_rtp_payload_feedback_, 0, true, 0) {
}
- ~FramerTest() {}
+ virtual ~FramerTest() {}
- void SetUp() {
+ virtual void SetUp() OVERRIDE {
// Build a default one packet frame - populate webrtc header.
rtp_header_.is_key_frame = false;
rtp_header_.frame_id = 0;
@@ -46,47 +43,44 @@ class FramerTest : public ::testing::Test {
TEST_F(FramerTest, EmptyState) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
}
TEST_F(FramerTest, AlwaysStartWithKey) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// Insert non key first frame.
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
rtp_header_.frame_id = 1;
rtp_header_.is_key_frame = true;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(1, frame.frame_id);
+ EXPECT_EQ(1u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// start with a complete key frame.
rtp_header_.is_key_frame = true;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
@@ -95,30 +89,29 @@ TEST_F(FramerTest, CompleteFrame) {
rtp_header_.is_key_frame = false;
rtp_header_.max_packet_id = 2;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
// Complete delta - can't skip, as incomplete sequence.
++rtp_header_.frame_id;
rtp_header_.max_packet_id = 0;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
}
TEST_F(FramerTest, ContinuousSequence) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// start with a complete key frame.
rtp_header_.is_key_frame = true;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
EXPECT_TRUE(frame.key_frame);
framer_.ReleaseFrame(frame.frame_id);
@@ -126,99 +119,89 @@ TEST_F(FramerTest, ContinuousSequence) {
rtp_header_.frame_id = 2;
rtp_header_.is_key_frame = false;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
}
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// Start with a complete key frame.
rtp_header_.is_key_frame = true;
- rtp_header_.frame_id = 255;
+ rtp_header_.frame_id = 255u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert wrapped delta frame - should be continuous.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// Start with a complete key frame.
rtp_header_.is_key_frame = true;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
framer_.Reset();
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
}
TEST_F(FramerTest, RequireKeyAfterReset) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
framer_.Reset();
// Start with a complete key frame.
rtp_header_.is_key_frame = false;
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 0u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
rtp_header_.frame_id = 1;
rtp_header_.is_key_frame = true;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
}
TEST_F(FramerTest, BasicNonLastReferenceId) {
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- base::TimeTicks timeout;
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.is_key_frame = false;
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
- rtp_header_.frame_id = 5;
+ rtp_header_.frame_id = 5u;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- timeout += base::TimeDelta::FromMilliseconds(kFrameTimeMillisecond);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
- &next_frame));
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kFrameTimeMillisecond));
-
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_FALSE(next_frame);
}
@@ -226,9 +209,9 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
EncodedVideoFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
+
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 0;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
@@ -245,105 +228,104 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(0u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(1, frame.frame_id);
+ EXPECT_EQ(1u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_FALSE(next_frame);
- EXPECT_EQ(4, frame.frame_id);
+ EXPECT_EQ(4u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert remaining packet of frame #2 - should no be continuous.
rtp_header_.frame_id = 2;
rtp_header_.packet_id = 1;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_FALSE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
rtp_header_.is_reference = false;
rtp_header_.frame_id = 5;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedVideoFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(5, frame.frame_id);
+ EXPECT_EQ(5u, frame.frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
EncodedAudioFrame frame;
- uint32_t rtp_timestamp;
- base::TimeTicks timeout;
+ uint32 rtp_timestamp;
bool next_frame = false;
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 254;
+
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(254, frame.frame_id);
+ EXPECT_EQ(254u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.frame_id = 255;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
// Insert wrapped frame - should be continuous.
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
EncodedAudioFrame frame;
- uint32_t rtp_timestamp;
+ uint32 rtp_timestamp;
bool next_frame = false;
- base::TimeTicks timeout;
// Insert and get first packet.
rtp_header_.is_key_frame = true;
rtp_header_.frame_id = 253;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(253, frame.frame_id);
+ EXPECT_EQ(253u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert third and fourth packets.
rtp_header_.frame_id = 255;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- rtp_header_.frame_id = 0;
+ rtp_header_.frame_id = 256;
framer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
// Get third and fourth packets.
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_FALSE(next_frame);
- EXPECT_EQ(255, frame.frame_id);
+ EXPECT_EQ(255u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
- EXPECT_TRUE(framer_.GetEncodedAudioFrame(timeout, &frame, &rtp_timestamp,
+ EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &rtp_timestamp,
&next_frame));
EXPECT_TRUE(next_frame);
- EXPECT_EQ(0, frame.frame_id);
+ EXPECT_EQ(256u, frame.frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
diff --git a/chromium/media/cast/logging/logging_defines.cc b/chromium/media/cast/logging/logging_defines.cc
new file mode 100644
index 00000000000..85abe7c5d45
--- /dev/null
+++ b/chromium/media/cast/logging/logging_defines.cc
@@ -0,0 +1,101 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/logging_defines.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+CastLoggingConfig::CastLoggingConfig()
+ : enable_data_collection(false),
+ enable_uma_stats(false),
+ enable_tracing(false) {}
+
+CastLoggingConfig::~CastLoggingConfig() {}
+
+CastLoggingConfig GetDefaultCastLoggingConfig() {
+ CastLoggingConfig config;
+ return config;
+}
+
+std::string CastLoggingToString(CastLoggingEvent event) {
+ switch (event) {
+ case(kUnknown):
+ // Can happen if the sender and receiver of RTCP log messages are not
+ // aligned.
+ return "Unknown";
+ case(kRttMs):
+ return "RttMs";
+ case(kPacketLoss):
+ return "PacketLoss";
+ case(kJitterMs):
+ return "JitterMs";
+ case(kAckReceived):
+ return "AckReceived";
+ case(kRembBitrate):
+ return "RembBitrate";
+ case(kAckSent):
+ return "AckSent";
+ case(kLastEvent):
+ return "LastEvent";
+ case(kAudioFrameReceived):
+ return "AudioFrameReceived";
+ case(kAudioFrameCaptured):
+ return "AudioFrameCaptured";
+ case(kAudioFrameEncoded):
+ return "AudioFrameEncoded";
+ case(kAudioPlayoutDelay):
+ return "AudioPlayoutDelay";
+ case(kAudioFrameDecoded):
+ return "AudioFrameDecoded";
+ case(kVideoFrameCaptured):
+ return "VideoFrameCaptured";
+ case(kVideoFrameReceived):
+ return "VideoFrameReceived";
+ case(kVideoFrameSentToEncoder):
+ return "VideoFrameSentToEncoder";
+ case(kVideoFrameEncoded):
+ return "VideoFrameEncoded";
+ case(kVideoFrameDecoded):
+ return "VideoFrameDecoded";
+ case(kVideoRenderDelay):
+ return "VideoRenderDelay";
+ case(kPacketSentToPacer):
+ return "PacketSentToPacer";
+ case(kPacketSentToNetwork):
+ return "PacketSentToNetwork";
+ case(kPacketRetransmited):
+ return "PacketRetransmited";
+ case(kPacketReceived):
+ return "PacketReceived";
+ default:
+ NOTREACHED();
+ return "";
+ }
+}
+
+FrameEvent::FrameEvent() {}
+FrameEvent::~FrameEvent() {}
+
+BasePacketInfo::BasePacketInfo() {}
+BasePacketInfo::~BasePacketInfo() {}
+
+PacketEvent::PacketEvent() {}
+PacketEvent::~PacketEvent() {}
+
+GenericEvent::GenericEvent() {}
+GenericEvent::~GenericEvent() {}
+
+FrameLogStats::FrameLogStats()
+ : framerate_fps(0),
+ bitrate_kbps(0),
+ max_delay_ms(0),
+ min_delay_ms(0),
+ avg_delay_ms(0) {}
+FrameLogStats::~FrameLogStats() {}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_defines.h b/chromium/media/cast/logging/logging_defines.h
new file mode 100644
index 00000000000..5a7bca1500f
--- /dev/null
+++ b/chromium/media/cast/logging/logging_defines.h
@@ -0,0 +1,130 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOGGING_DEFINES_H_
+#define MEDIA_CAST_LOGGING_LOGGING_DEFINES_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/memory/linked_ptr.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kFrameIdUnknown = 0xFFFF;
+
+struct CastLoggingConfig {
+ CastLoggingConfig();
+ ~CastLoggingConfig();
+
+ bool enable_data_collection;
+ bool enable_uma_stats;
+ bool enable_tracing;
+};
+
+// By default, enable raw and stats data collection. Disable tracing and UMA.
+CastLoggingConfig GetDefaultCastLoggingConfig();
+
+enum CastLoggingEvent {
+ // Generic events.
+ kUnknown,
+ kRttMs,
+ kPacketLoss,
+ kJitterMs,
+ kAckReceived,
+ kRembBitrate,
+ kAckSent,
+ kLastEvent,
+ // Audio sender.
+ kAudioFrameReceived,
+ kAudioFrameCaptured,
+ kAudioFrameEncoded,
+ // Audio receiver.
+ kAudioPlayoutDelay,
+ kAudioFrameDecoded,
+ // Video sender.
+ kVideoFrameCaptured,
+ kVideoFrameReceived,
+ kVideoFrameSentToEncoder,
+ kVideoFrameEncoded,
+ // Video receiver.
+ kVideoFrameDecoded,
+ kVideoRenderDelay,
+ // Send-side packet events.
+ kPacketSentToPacer,
+ kPacketSentToNetwork,
+ kPacketRetransmited,
+ // Receive-side packet events.
+ kPacketReceived,
+
+ kNumOfLoggingEvents,
+};
+
+std::string CastLoggingToString(CastLoggingEvent event);
+
+struct FrameEvent {
+ FrameEvent();
+ ~FrameEvent();
+
+ uint32 frame_id;
+ size_t size; // Encoded size only.
+ std::vector<base::TimeTicks> timestamp;
+ std::vector<CastLoggingEvent> type;
+ base::TimeDelta delay_delta; // Render/playout delay.
+};
+
+// Internal map sorted by packet id.
+struct BasePacketInfo {
+ BasePacketInfo();
+ ~BasePacketInfo();
+
+ size_t size;
+ std::vector<base::TimeTicks> timestamp;
+ std::vector<CastLoggingEvent> type;
+};
+
+typedef std::map<uint16, BasePacketInfo> BasePacketMap;
+
+struct PacketEvent {
+ PacketEvent();
+ ~PacketEvent();
+ uint32 frame_id;
+ int max_packet_id;
+ BasePacketMap packet_map;
+};
+
+struct GenericEvent {
+ GenericEvent();
+ ~GenericEvent();
+ std::vector<int> value;
+ std::vector<base::TimeTicks> timestamp;
+};
+
+struct FrameLogStats {
+ FrameLogStats();
+ ~FrameLogStats();
+
+ double framerate_fps;
+ double bitrate_kbps;
+ int max_delay_ms;
+ int min_delay_ms;
+ int avg_delay_ms;
+};
+
+// Store all log types in a map based on the event.
+typedef std::map<uint32, FrameEvent> FrameRawMap;
+typedef std::map<uint32, PacketEvent> PacketRawMap;
+typedef std::map<CastLoggingEvent, GenericEvent> GenericRawMap;
+
+typedef std::map<CastLoggingEvent, linked_ptr<FrameLogStats > > FrameStatsMap;
+typedef std::map<CastLoggingEvent, double> PacketStatsMap;
+typedef std::map<CastLoggingEvent, double> GenericStatsMap;
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOGGING_DEFINES_H_
diff --git a/chromium/media/cast/logging/logging_impl.cc b/chromium/media/cast/logging/logging_impl.cc
new file mode 100644
index 00000000000..ea96b94b610
--- /dev/null
+++ b/chromium/media/cast/logging/logging_impl.cc
@@ -0,0 +1,225 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/trace_event.h"
+#include "base/metrics/histogram.h"
+#include "media/cast/logging/logging_impl.h"
+#include "net/base/big_endian.h"
+
+namespace media {
+namespace cast {
+
+LoggingImpl::LoggingImpl(base::TickClock* clock,
+ scoped_refptr<base::TaskRunner> main_thread_proxy,
+ const CastLoggingConfig& config)
+ : main_thread_proxy_(main_thread_proxy),
+ config_(config),
+ raw_(clock),
+ stats_(clock) {}
+
+LoggingImpl::~LoggingImpl() {}
+
+void LoggingImpl::InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
+ raw_.InsertFrameEvent(event, rtp_timestamp, frame_id);
+ stats_.InsertFrameEvent(event, rtp_timestamp, frame_id);
+ }
+ if (config_.enable_tracing) {
+ std::string event_string = CastLoggingToString(event);
+ TRACE_EVENT_INSTANT2(event_string.c_str(), "FE",
+ TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_id",
+ frame_id);
+ }
+}
+
+void LoggingImpl::InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int frame_size) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
+ raw_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
+ stats_.InsertFrameEventWithSize(event, rtp_timestamp, frame_id, frame_size);
+ }
+ if (config_.enable_uma_stats) {
+ UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), frame_size);
+ }
+ if (config_.enable_tracing) {
+ std::string event_string = CastLoggingToString(event);
+ TRACE_EVENT_INSTANT2(event_string.c_str(), "FES",
+ TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "frame_size",
+ frame_size);
+ }
+}
+
+void LoggingImpl::InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
+ raw_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
+ stats_.InsertFrameEventWithDelay(event, rtp_timestamp, frame_id, delay);
+ }
+ if (config_.enable_uma_stats) {
+ UMA_HISTOGRAM_TIMES(CastLoggingToString(event), delay);
+ }
+ if (config_.enable_tracing) {
+ std::string event_string = CastLoggingToString(event);
+ TRACE_EVENT_INSTANT2(event_string.c_str(), "FED",
+ TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp, "delay",
+ delay.InMilliseconds());
+ }
+}
+
+void LoggingImpl::InsertPacketListEvent(CastLoggingEvent event,
+ const PacketList& packets) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ for (unsigned int i = 0; i < packets.size(); ++i) {
+ const Packet& packet = packets[i];
+ // Parse basic properties.
+ uint32 rtp_timestamp;
+ uint16 packet_id, max_packet_id;
+ const uint8* packet_data = &packet[0];
+ net::BigEndianReader big_endian_reader(packet_data + 4, 4);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ net::BigEndianReader cast_big_endian_reader(packet_data + 12 + 2, 4);
+ cast_big_endian_reader.ReadU16(&packet_id);
+ cast_big_endian_reader.ReadU16(&max_packet_id);
+ // rtp_timestamp is enough - no need for frame_id as well.
+ InsertPacketEvent(event, rtp_timestamp, kFrameIdUnknown, packet_id,
+ max_packet_id, packet.size());
+ }
+}
+
+void LoggingImpl::InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
+ raw_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
+ max_packet_id, size);
+ stats_.InsertPacketEvent(event, rtp_timestamp, frame_id, packet_id,
+ max_packet_id, size);
+ }
+ if (config_.enable_tracing) {
+ std::string event_string = CastLoggingToString(event);
+ TRACE_EVENT_INSTANT2(event_string.c_str(), "PE",
+ TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp", rtp_timestamp,
+ "packet_id", packet_id);
+ }
+}
+
+void LoggingImpl::InsertGenericEvent(CastLoggingEvent event, int value) {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ if (config_.enable_data_collection) {
+ raw_.InsertGenericEvent(event, value);
+ stats_.InsertGenericEvent(event, value);
+ }
+ if (config_.enable_uma_stats) {
+ UMA_HISTOGRAM_COUNTS(CastLoggingToString(event), value);
+ }
+ if (config_.enable_tracing) {
+ std::string event_string = CastLoggingToString(event);
+ TRACE_EVENT_INSTANT1(event_string.c_str(), "GE",
+ TRACE_EVENT_SCOPE_THREAD, "value", value);
+ }
+}
+
+// should just get the entire class, would be much easier.
+FrameRawMap LoggingImpl::GetFrameRawData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ return raw_.GetFrameData();
+}
+
+PacketRawMap LoggingImpl::GetPacketRawData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ return raw_.GetPacketData();
+}
+
+GenericRawMap LoggingImpl::GetGenericRawData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ return raw_.GetGenericData();
+}
+
+const FrameStatsMap* LoggingImpl::GetFrameStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ // Get stats data.
+ const FrameStatsMap* stats = stats_.GetFrameStatsData();
+ if (config_.enable_uma_stats) {
+ FrameStatsMap::const_iterator it;
+ for (it = stats->begin(); it != stats->end(); ++it) {
+ // Check for an active event.
+ if (it->second->framerate_fps > 0) {
+ std::string event_string = CastLoggingToString(it->first);
+ UMA_HISTOGRAM_COUNTS(event_string.append("_framerate_fps"),
+ it->second->framerate_fps);
+ } else {
+ // All active frame events trigger framerate computation.
+ continue;
+ }
+ if (it->second->bitrate_kbps > 0) {
+ std::string evnt_string = CastLoggingToString(it->first);
+ UMA_HISTOGRAM_COUNTS(evnt_string.append("_bitrate_kbps"),
+ it->second->framerate_fps);
+ }
+ if (it->second->avg_delay_ms > 0) {
+ std::string event_string = CastLoggingToString(it->first);
+ UMA_HISTOGRAM_COUNTS(event_string.append("_avg_delay_ms"),
+ it->second->avg_delay_ms);
+ UMA_HISTOGRAM_COUNTS(event_string.append("_min_delay_ms"),
+ it->second->min_delay_ms);
+ UMA_HISTOGRAM_COUNTS(event_string.append("_max_delay_ms"),
+ it->second->max_delay_ms);
+ }
+ }
+ }
+ return stats;
+}
+
+const PacketStatsMap* LoggingImpl::GetPacketStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ // Get stats data.
+ const PacketStatsMap* stats = stats_.GetPacketStatsData();
+ if (config_.enable_uma_stats) {
+ PacketStatsMap::const_iterator it;
+ for (it = stats->begin(); it != stats->end(); ++it) {
+ if (it->second > 0) {
+ std::string event_string = CastLoggingToString(it->first);
+ UMA_HISTOGRAM_COUNTS(event_string.append("_bitrate_kbps"), it->second);
+ }
+ }
+ }
+ return stats;
+}
+
+const GenericStatsMap* LoggingImpl::GetGenericStatsData() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ // Get stats data.
+ const GenericStatsMap* stats = stats_.GetGenericStatsData();
+ if (config_.enable_uma_stats) {
+ GenericStatsMap::const_iterator it;
+ for (it = stats->begin(); it != stats->end(); ++it) {
+ if (it->second > 0) {
+ UMA_HISTOGRAM_COUNTS(CastLoggingToString(it->first), it->second);
+ }
+ }
+ }
+ return stats;
+}
+
+void LoggingImpl::Reset() {
+ DCHECK(main_thread_proxy_->RunsTasksOnCurrentThread());
+ raw_.Reset();
+ stats_.Reset();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_impl.h b/chromium/media/cast/logging/logging_impl.h
new file mode 100644
index 00000000000..34021b7d03c
--- /dev/null
+++ b/chromium/media/cast/logging/logging_impl.h
@@ -0,0 +1,78 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef MEDIA_CAST_LOGGING_LOGGING_IMPL_H_
+#define MEDIA_CAST_LOGGING_LOGGING_IMPL_H_
+
+// Generic class that handles event logging for the cast library.
+// Logging has three possible optional forms:
+// 1. Raw data and stats accessible by the application.
+// 2. UMA stats.
+// 3. Tracing of raw events.
+
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/logging_raw.h"
+#include "media/cast/logging/logging_stats.h"
+
+namespace media {
+namespace cast {
+
+// Should only be called from the main thread.
+class LoggingImpl : public base::NonThreadSafe {
+ public:
+ LoggingImpl(base::TickClock* clock,
+ scoped_refptr<base::TaskRunner> main_thread_proxy,
+ const CastLoggingConfig& config);
+
+ ~LoggingImpl();
+
+ // TODO(pwestin): Add argument to API to send in time of event instead of
+ // grabbing now.
+ void InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id);
+ void InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int frame_size);
+ void InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay);
+ void InsertPacketListEvent(CastLoggingEvent event, const PacketList& packets);
+
+ void InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size);
+ void InsertGenericEvent(CastLoggingEvent event, int value);
+
+ // Get raw data.
+ FrameRawMap GetFrameRawData();
+ PacketRawMap GetPacketRawData();
+ GenericRawMap GetGenericRawData();
+ // Get stats only (computed when called). Triggers UMA stats when enabled.
+ const FrameStatsMap* GetFrameStatsData();
+ const PacketStatsMap* GetPacketStatsData();
+ const GenericStatsMap* GetGenericStatsData();
+
+ void Reset();
+
+ private:
+ scoped_refptr<base::TaskRunner> main_thread_proxy_;
+ const CastLoggingConfig config_;
+ LoggingRaw raw_;
+ LoggingStats stats_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoggingImpl);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOGGING_IMPL_H_
diff --git a/chromium/media/cast/logging/logging_internal.cc b/chromium/media/cast/logging/logging_internal.cc
new file mode 100644
index 00000000000..ce2249ee4e0
--- /dev/null
+++ b/chromium/media/cast/logging/logging_internal.cc
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/logging_internal.h"
+
+namespace media {
+namespace cast {
+
+FrameLogData::FrameLogData(base::TickClock* clock)
+ : clock_(clock),
+ frame_map_() {}
+
+FrameLogData::~FrameLogData() {}
+
+void FrameLogData::Insert(uint32 rtp_timestamp, uint32 frame_id) {
+ FrameEvent info;
+ InsertBase(rtp_timestamp, frame_id, info);
+}
+
+void FrameLogData::InsertWithSize(
+ uint32 rtp_timestamp, uint32 frame_id, int size) {
+ FrameEvent info;
+ info.size = size;
+ InsertBase(rtp_timestamp, frame_id, info);
+}
+
+void FrameLogData::InsertWithDelay(
+ uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay) {
+ FrameEvent info;
+ info.delay_delta = delay;
+ InsertBase(rtp_timestamp, frame_id, info);
+}
+
+void FrameLogData::InsertBase(
+ uint32 rtp_timestamp, uint32 frame_id, FrameEvent info) {
+ info.timestamp = clock_->NowTicks();
+ info.frame_id = frame_id;
+ frame_map_.insert(std::make_pair(rtp_timestamp, info));
+}
+
+PacketLogData::PacketLogData(base::TickClock* clock)
+ : clock_(clock),
+ packet_map_() {}
+
+PacketLogData::~PacketLogData() {}
+
+void PacketLogData::Insert(uint32 rtp_timestamp,
+ uint32 frame_id, uint16 packet_id, uint16 max_packet_id, int size) {
+ PacketEvent info;
+ info.size = size;
+ info.max_packet_id = max_packet_id;
+ info.frame_id = frame_id;
+ info.timestamp = clock_->NowTicks();
+ // Is this a new frame?
+ PacketMap::iterator it = packet_map_.find(rtp_timestamp);
+ if (it == packet_map_.end()) {
+ // New rtp_timestamp id - create base packet map.
+ BasePacketMap base_map;
+ base_map.insert(std::make_pair(packet_id, info));
+ packet_map_.insert(std::make_pair(rtp_timestamp, base_map));
+ } else {
+ // Existing rtp_timestamp.
+ it->second.insert(std::make_pair(packet_id, info));
+ }
+}
+
+GenericLogData::GenericLogData(base::TickClock* clock)
+ : clock_(clock) {}
+
+GenericLogData::~GenericLogData() {}
+
+void GenericLogData::Insert(int data) {
+ data_.push_back(data);
+ timestamp_.push_back(clock_->NowTicks());
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_internal.h b/chromium/media/cast/logging/logging_internal.h
new file mode 100644
index 00000000000..6f028b925fe
--- /dev/null
+++ b/chromium/media/cast/logging/logging_internal.h
@@ -0,0 +1,95 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
+#define MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+// TODO(mikhal): Consider storing only the delta time and not absolute time.
+struct FrameEvent {
+ uint32 frame_id;
+ int size;
+ base::TimeTicks timestamp;
+ base::TimeDelta delay_delta; // render/playout delay.
+};
+
+struct PacketEvent {
+ uint32 frame_id;
+ int max_packet_id;
+ size_t size;
+ base::TimeTicks timestamp;
+};
+
+// Frame and packet maps are sorted based on the rtp_timestamp.
+typedef std::map<uint32, FrameEvent> FrameMap;
+typedef std::map<uint16, PacketEvent> BasePacketMap;
+typedef std::map<uint32, BasePacketMap> PacketMap;
+
+class FrameLogData {
+ public:
+ explicit FrameLogData(base::TickClock* clock);
+ ~FrameLogData();
+ void Insert(uint32 rtp_timestamp, uint32 frame_id);
+ // Include size for encoded images (compute bitrate),
+ void InsertWithSize(uint32 rtp_timestamp, uint32 frame_id, int size);
+ // Include playout/render delay info.
+ void InsertWithDelay(
+ uint32 rtp_timestamp, uint32 frame_id, base::TimeDelta delay);
+ void Reset();
+
+ private:
+ void InsertBase(uint32 rtp_timestamp, uint32 frame_id, FrameEvent info);
+
+ base::TickClock* const clock_; // Not owned by this class.
+ FrameMap frame_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameLogData);
+};
+
+// TODO(mikhal): Should be able to handle packet bursts.
+class PacketLogData {
+ public:
+ explicit PacketLogData(base::TickClock* clock);
+ ~PacketLogData();
+ void Insert(uint32 rtp_timestamp, uint32 frame_id, uint16 packet_id,
+ uint16 max_packet_id, int size);
+ void Reset();
+
+ private:
+ base::TickClock* const clock_; // Not owned by this class.
+ PacketMap packet_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(PacketLogData);
+};
+
+class GenericLogData {
+ public:
+ explicit GenericLogData(base::TickClock* clock);
+ ~GenericLogData();
+ void Insert(int value);
+ void Reset();
+
+ private:
+ base::TickClock* const clock_; // Not owned by this class.
+ std::vector<int> data_;
+ std::vector<base::TimeTicks> timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericLogData);
+};
+
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOGGING_INTERNAL_H_
diff --git a/chromium/media/cast/logging/logging_raw.cc b/chromium/media/cast/logging/logging_raw.cc
new file mode 100644
index 00000000000..6a389617f62
--- /dev/null
+++ b/chromium/media/cast/logging/logging_raw.cc
@@ -0,0 +1,143 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/logging/logging_raw.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+LoggingRaw::LoggingRaw(base::TickClock* clock)
+ : clock_(clock),
+ frame_map_(),
+ packet_map_(),
+ generic_map_(),
+ weak_factory_(this) {}
+
+LoggingRaw::~LoggingRaw() {}
+
+void LoggingRaw::InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+}
+
+void LoggingRaw::InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int size) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+ // Now insert size.
+ FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
+ DCHECK(it != frame_map_.end());
+ it->second.size = size;
+}
+
+void LoggingRaw::InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+ // Now insert delay.
+ FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
+ DCHECK(it != frame_map_.end());
+ it->second.delay_delta = delay;
+}
+
+void LoggingRaw::InsertBaseFrameEvent(CastLoggingEvent event,
+ uint32 frame_id,
+ uint32 rtp_timestamp) {
+ // Is this a new event?
+ FrameRawMap::iterator it = frame_map_.find(rtp_timestamp);
+ if (it == frame_map_.end()) {
+ // Create a new map entry.
+ FrameEvent info;
+ info.frame_id = frame_id;
+ info.timestamp.push_back(clock_->NowTicks());
+ info.type.push_back(event);
+ frame_map_.insert(std::make_pair(rtp_timestamp, info));
+ } else {
+ // Insert to an existing entry.
+ it->second.timestamp.push_back(clock_->NowTicks());
+ it->second.type.push_back(event);
+ // Do we have a valid frame_id?
+ // Not all events have a valid frame id.
+ if (it->second.frame_id == kFrameIdUnknown && frame_id != kFrameIdUnknown)
+ it->second.frame_id = frame_id;
+ }
+}
+
+void LoggingRaw::InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size) {
+ // Is this packet belonging to a new frame?
+ PacketRawMap::iterator it = packet_map_.find(rtp_timestamp);
+ if (it == packet_map_.end()) {
+ // Create a new entry - start with base packet map.
+ PacketEvent info;
+ info.frame_id = frame_id;
+ info.max_packet_id = max_packet_id;
+ BasePacketInfo base_info;
+ base_info.size = size;
+ base_info.timestamp.push_back(clock_->NowTicks());
+ base_info.type.push_back(event);
+ packet_map_.insert(std::make_pair(rtp_timestamp, info));
+ } else {
+ // Is this a new packet?
+ BasePacketMap::iterator packet_it = it->second.packet_map.find(packet_id);
+ if (packet_it == it->second.packet_map.end()) {
+ BasePacketInfo base_info;
+ base_info.size = size;
+ base_info.timestamp.push_back(clock_->NowTicks());
+ base_info.type.push_back(event);
+ it->second.packet_map.insert(std::make_pair(packet_id, base_info));
+ } else {
+ packet_it->second.timestamp.push_back(clock_->NowTicks());
+ packet_it->second.type.push_back(event);
+ }
+ }
+}
+
+void LoggingRaw::InsertGenericEvent(CastLoggingEvent event, int value) {
+ GenericEvent event_data;
+ event_data.value.push_back(value);
+ event_data.timestamp.push_back(clock_->NowTicks());
+ // Is this a new event?
+ GenericRawMap::iterator it = generic_map_.find(event);
+ if (it == generic_map_.end()) {
+ // Create new entry.
+ generic_map_.insert(std::make_pair(event, event_data));
+ } else {
+ // Insert to existing entry.
+ it->second.value.push_back(value);
+ it->second.timestamp.push_back(clock_->NowTicks());
+ }
+}
+
+FrameRawMap LoggingRaw::GetFrameData() const {
+ return frame_map_;
+}
+
+PacketRawMap LoggingRaw::GetPacketData() const {
+ return packet_map_;
+}
+
+GenericRawMap LoggingRaw::GetGenericData() const {
+ return generic_map_;
+}
+
+void LoggingRaw::Reset() {
+ frame_map_.clear();
+ packet_map_.clear();
+ generic_map_.clear();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_raw.h b/chromium/media/cast/logging/logging_raw.h
new file mode 100644
index 00000000000..4ac8d0fb7ad
--- /dev/null
+++ b/chromium/media/cast/logging/logging_raw.h
@@ -0,0 +1,85 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOGGING_RAW_H_
+#define MEDIA_CAST_LOGGING_LOGGING_RAW_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/logging/logging_defines.h"
+
+namespace media {
+namespace cast {
+
+// This class is not thread safe, and should only be called from the main
+// thread.
+class LoggingRaw : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<LoggingRaw> {
+ public:
+ explicit LoggingRaw(base::TickClock* clock);
+ ~LoggingRaw();
+
+ // Inform of new event: three types of events: frame, packets and generic.
+ // Frame events can be inserted with different parameters.
+ void InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id);
+
+ // Size - Inserting the size implies that this is an encoded frame.
+ void InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int frame_size);
+
+ // Render/playout delay
+ void InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay);
+
+ // Insert a packet event.
+ void InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size);
+
+ void InsertGenericEvent(CastLoggingEvent event, int value);
+
+ // Get raw log data.
+ FrameRawMap GetFrameData() const;
+ PacketRawMap GetPacketData() const;
+ GenericRawMap GetGenericData() const;
+
+
+ // Reset all log data.
+ void Reset();
+
+ private:
+ void InsertBaseFrameEvent(CastLoggingEvent event,
+ uint32 frame_id,
+ uint32 rtp_timestamp);
+
+ base::TickClock* const clock_; // Not owned by this class.
+ FrameRawMap frame_map_;
+ PacketRawMap packet_map_;
+ GenericRawMap generic_map_;
+ base::WeakPtrFactory<LoggingRaw> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoggingRaw);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOGGING_RAW_H_
+
diff --git a/chromium/media/cast/logging/logging_stats.cc b/chromium/media/cast/logging/logging_stats.cc
new file mode 100644
index 00000000000..84fdbf7a615
--- /dev/null
+++ b/chromium/media/cast/logging/logging_stats.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/logging/logging_stats.h"
+
+namespace media {
+namespace cast {
+
+LoggingStats::LoggingStats(base::TickClock* clock)
+ : frame_stats_(),
+ packet_stats_(),
+ generic_stats_(),
+ start_time_(),
+ clock_(clock) {
+ memset(counts_, 0, sizeof(counts_));
+ memset(start_time_, 0, sizeof(start_time_));
+}
+
+LoggingStats::~LoggingStats() {}
+
+void LoggingStats::Reset() {
+ frame_stats_.clear();
+ packet_stats_.clear();
+ generic_stats_.clear();
+ memset(counts_, 0, sizeof(counts_));
+}
+
+void LoggingStats::InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+}
+
+void LoggingStats::InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int frame_size) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+ // Update size.
+ FrameStatsMap::iterator it = frame_stats_.find(event);
+ DCHECK(it != frame_stats_.end());
+ it->second->bitrate_kbps += frame_size;
+}
+
+void LoggingStats::InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay) {
+ InsertBaseFrameEvent(event, frame_id, rtp_timestamp);
+ // Update size.
+ FrameStatsMap::iterator it = frame_stats_.find(event);
+ DCHECK(it != frame_stats_.end());
+ // Using the average delay as a counter, will divide by the counter when
+ // triggered.
+ it->second->avg_delay_ms += delay.InMilliseconds();
+ if (delay.InMilliseconds() > it->second->max_delay_ms)
+ it->second->max_delay_ms = delay.InMilliseconds();
+ if ((delay.InMilliseconds() < it->second->min_delay_ms) ||
+ (counts_[event] == 1) )
+ it->second->min_delay_ms = delay.InMilliseconds();
+}
+
+void LoggingStats::InsertBaseFrameEvent(CastLoggingEvent event,
+ uint32 frame_id,
+ uint32 rtp_timestamp) {
+ // Does this belong to an existing event?
+ FrameStatsMap::iterator it = frame_stats_.find(event);
+ if (it == frame_stats_.end()) {
+ // New event.
+ start_time_[event] = clock_->NowTicks();
+ linked_ptr<FrameLogStats> stats(new FrameLogStats());
+ frame_stats_.insert(std::make_pair(event, stats));
+ }
+
+ ++counts_[event];
+}
+
+void LoggingStats::InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size) {
+ // Does this packet belong to an existing event?
+ PacketStatsMap::iterator it = packet_stats_.find(event);
+ if (it == packet_stats_.end()) {
+ // New event.
+ start_time_[event] = clock_->NowTicks();
+ packet_stats_.insert(std::make_pair(event, size));
+ } else {
+ // Add to existing.
+ it->second += size;
+ }
+ ++counts_[event];
+}
+
+void LoggingStats::InsertGenericEvent(CastLoggingEvent event, int value) {
+ // Does this event belong to an existing event?
+ GenericStatsMap::iterator it = generic_stats_.find(event);
+ if (it == generic_stats_.end()) {
+ // New event.
+ start_time_[event] = clock_->NowTicks();
+ generic_stats_.insert(std::make_pair(event, value));
+ } else {
+ // Add to existing (will be used to compute average).
+ it->second += value;
+ }
+ ++counts_[event];
+}
+
+const FrameStatsMap* LoggingStats::GetFrameStatsData() {
+ // Compute framerate and bitrate (when available).
+ FrameStatsMap::iterator it;
+ for (it = frame_stats_.begin(); it != frame_stats_.end(); ++it) {
+ base::TimeDelta time_diff = clock_->NowTicks() - start_time_[it->first];
+ it->second->framerate_fps = counts_[it->first] / time_diff.InSecondsF();
+ if (it->second->bitrate_kbps > 0) {
+ it->second->bitrate_kbps = (8 / 1000) *
+ it->second->bitrate_kbps / time_diff.InSecondsF();
+ }
+ if (it->second->avg_delay_ms > 0)
+ it->second->avg_delay_ms /= counts_[it->first];
+ }
+ return &frame_stats_;
+}
+
+const PacketStatsMap* LoggingStats::GetPacketStatsData() {
+ PacketStatsMap::iterator it;
+ for (it = packet_stats_.begin(); it != packet_stats_.end(); ++it) {
+ if (counts_[it->first] == 0) continue;
+ base::TimeDelta time_diff = clock_->NowTicks() - start_time_[it->first];
+ it->second = (8 / 1000) * it->second / time_diff.InSecondsF();
+ }
+ return &packet_stats_;
+}
+
+const GenericStatsMap* LoggingStats::GetGenericStatsData() {
+ // Compute averages.
+ GenericStatsMap::iterator it;
+ for (it = generic_stats_.begin(); it != generic_stats_.end(); ++it) {
+ it->second /= counts_[ it->first];
+ }
+ return &generic_stats_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/logging/logging_stats.h b/chromium/media/cast/logging/logging_stats.h
new file mode 100644
index 00000000000..f08649cc777
--- /dev/null
+++ b/chromium/media/cast/logging/logging_stats.h
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_LOGGING_LOGGING_STATS_H_
+#define MEDIA_CAST_LOGGING_LOGGING_STATS_H_
+
+#include "base/basictypes.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/logging/logging_defines.h"
+
+namespace media {
+namespace cast {
+
+class LoggingStats {
+ public:
+ explicit LoggingStats(base::TickClock* clock);
+
+ ~LoggingStats();
+
+ void Reset();
+
+ void InsertFrameEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id);
+
+ void InsertFrameEventWithSize(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ int frame_size);
+
+ void InsertFrameEventWithDelay(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ base::TimeDelta delay);
+
+ void InsertPacketEvent(CastLoggingEvent event,
+ uint32 rtp_timestamp,
+ uint32 frame_id,
+ uint16 packet_id,
+ uint16 max_packet_id,
+ size_t size);
+
+ void InsertGenericEvent(CastLoggingEvent event, int value);
+
+ // Get log stats: some of the values, such as frame rate and bit rates are
+ // computed at the time of the call.
+ const FrameStatsMap* GetFrameStatsData();
+
+ const PacketStatsMap* GetPacketStatsData();
+
+ const GenericStatsMap* GetGenericStatsData();
+
+ private:
+ void InsertBaseFrameEvent(CastLoggingEvent event,
+ uint32 frame_id,
+ uint32 rtp_timestamp);
+ FrameStatsMap frame_stats_;
+ PacketStatsMap packet_stats_;
+ GenericStatsMap generic_stats_;
+ // Every event has an individual start time
+ base::TimeTicks start_time_[kNumOfLoggingEvents];
+ // Keep track of event counts.
+ int counts_[kNumOfLoggingEvents];
+ base::TickClock* const clock_; // Not owned by this class.
+
+ DISALLOW_COPY_AND_ASSIGN(LoggingStats);
+ };
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_LOGGING_LOGGING_STATS_H_
+
diff --git a/chromium/media/cast/logging/logging_unittest.cc b/chromium/media/cast/logging/logging_unittest.cc
new file mode 100644
index 00000000000..5ce760ec4c7
--- /dev/null
+++ b/chromium/media/cast/logging/logging_unittest.cc
@@ -0,0 +1,248 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+
+#include "base/rand_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/logging/logging_impl.h"
+
+
+namespace media {
+namespace cast {
+
+ // Insert frame duration- one second.
+const int64 kIntervalTime1S = 1;
+// Test frame rate goal - 30fps.
+const int kFrameIntervalMs = 33;
+
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+
+class TestLogging : public ::testing::Test {
+ protected:
+ TestLogging()
+ // Enable logging, disable tracing and uma.
+ : logging_(&testing_clock_, true, false, false) {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual ~TestLogging() {}
+
+ LoggingImpl logging_;
+ base::SimpleTestTickClock testing_clock_;
+};
+
+TEST_F(TestLogging, BasicFrameLogging) {
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ FrameRawMap frame_map = logging_.GetFrameRawData();
+ // Size of map should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, frame_map.size());
+ // Verify stats.
+ const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
+ // Size of stats equals the number of events.
+ EXPECT_EQ(1u, frame_stats->size());
+ FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
+ EXPECT_TRUE(it != frame_stats->end());
+ EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
+ EXPECT_EQ(0, it->second->bitrate_kbps);
+ EXPECT_EQ(0, it->second->max_delay_ms);
+ EXPECT_EQ(0, it->second->min_delay_ms);
+ EXPECT_EQ(0, it->second->avg_delay_ms);
+}
+
+TEST_F(TestLogging, FrameLoggingWithSize) {
+ // Average packet size.
+ const int kBaseFrameSizeBytes = 25000;
+ const int kRandomSizeInterval = 100;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ int size = kBaseFrameSizeBytes +
+ base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
+ logging_.InsertFrameEventWithSize(
+ kAudioFrameCaptured, rtp_timestamp, frame_id, size);
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ FrameRawMap frame_map = logging_.GetFrameRawData();
+ // Size of map should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, frame_map.size());
+ // Verify stats.
+ const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
+ // Size of stats equals the number of events.
+ EXPECT_EQ(1u, frame_stats->size());
+ FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
+ EXPECT_TRUE(it != frame_stats->end());
+ EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
+ EXPECT_NEAR(8 * kBaseFrameSizeBytes / (kFrameIntervalMs * 1000),
+ it->second->bitrate_kbps, kRandomSizeInterval);
+ EXPECT_EQ(0, it->second->max_delay_ms);
+ EXPECT_EQ(0, it->second->min_delay_ms);
+ EXPECT_EQ(0, it->second->avg_delay_ms);
+}
+
+TEST_F(TestLogging, FrameLoggingWithDelay) {
+ // Average packet size.
+ const int kPlayoutDelayMs = 50;
+ const int kRandomSizeInterval = 20;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ int delay = kPlayoutDelayMs +
+ base::RandInt(-kRandomSizeInterval, kRandomSizeInterval);
+ logging_.InsertFrameEventWithDelay(
+ kAudioFrameCaptured, rtp_timestamp, frame_id,
+ base::TimeDelta::FromMilliseconds(delay));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ FrameRawMap frame_map = logging_.GetFrameRawData();
+ // Size of map should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, frame_map.size());
+ // Verify stats.
+ const FrameStatsMap* frame_stats = logging_.GetFrameStatsData();
+ // Size of stats equals the number of events.
+ EXPECT_EQ(1u, frame_stats->size());
+ FrameStatsMap::const_iterator it = frame_stats->find(kAudioFrameCaptured);
+ EXPECT_TRUE(it != frame_stats->end());
+ EXPECT_NEAR(30.3, it->second->framerate_fps, 0.1);
+ EXPECT_EQ(0, it->second->bitrate_kbps);
+ EXPECT_GE(kPlayoutDelayMs + kRandomSizeInterval, it->second->max_delay_ms);
+ EXPECT_LE(kPlayoutDelayMs - kRandomSizeInterval, it->second->min_delay_ms);
+ EXPECT_NEAR(kPlayoutDelayMs, it->second->avg_delay_ms,
+ 0.2 * kRandomSizeInterval);
+}
+
+TEST_F(TestLogging, MultipleEventFrameLogging) {
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ logging_.InsertFrameEvent(kAudioFrameCaptured, rtp_timestamp, frame_id);
+ if (frame_id % 2) {
+ logging_.InsertFrameEventWithSize(
+ kAudioFrameEncoded, rtp_timestamp, frame_id, 1500);
+ } else if (frame_id % 3) {
+ logging_.InsertFrameEvent(kVideoFrameDecoded, rtp_timestamp, frame_id);
+ } else {
+ logging_.InsertFrameEventWithDelay(
+ kVideoRenderDelay, rtp_timestamp, frame_id,
+ base::TimeDelta::FromMilliseconds(20));
+ }
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ FrameRawMap frame_map = logging_.GetFrameRawData();
+ // Size of map should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, frame_map.size());
+ // Multiple events captured per frame.
+}
+
+TEST_F(TestLogging, PacketLogging) {
+ const int kNumPacketsPerFrame = 10;
+ const int kBaseSize = 2500;
+ const int kSizeInterval = 100;
+ base::TimeTicks start_time = testing_clock_.NowTicks();
+ base::TimeDelta time_interval = testing_clock_.NowTicks() - start_time;
+ uint32 rtp_timestamp = 0;
+ uint32 frame_id = 0;
+ do {
+ for (int i = 0; i < kNumPacketsPerFrame; ++i) {
+ int size = kBaseSize + base::RandInt(-kSizeInterval, kSizeInterval);
+ logging_.InsertPacketEvent(kPacketSentToPacer, rtp_timestamp, frame_id,
+ i, kNumPacketsPerFrame, size);
+ }
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kFrameIntervalMs));
+ rtp_timestamp += kFrameIntervalMs * 90;
+ ++frame_id;
+ time_interval = testing_clock_.NowTicks() - start_time;
+ } while (time_interval.InSeconds() < kIntervalTime1S);
+ // Get logging data.
+ PacketRawMap raw_map = logging_.GetPacketRawData();
+ // Size of map should be equal to the number of frames logged.
+ EXPECT_EQ(frame_id, raw_map.size());
+ // Verify stats.
+ const PacketStatsMap* stats_map = logging_.GetPacketStatsData();
+ // Size of stats equals the number of events.
+ EXPECT_EQ(1u, stats_map->size());
+ PacketStatsMap::const_iterator it = stats_map->find(kPacketSentToPacer);
+ EXPECT_TRUE(it != stats_map->end());
+ // We only store the bitrate as a packet statistic.
+ EXPECT_NEAR(8 * kNumPacketsPerFrame * kBaseSize / (kFrameIntervalMs * 1000),
+ it->second, kSizeInterval);
+}
+
+TEST_F(TestLogging, GenericLogging) {
+ // Insert multiple generic types.
+ const int kNumRuns = 1000;
+ const int kBaseValue = 20;
+ for (int i = 0; i < kNumRuns; ++i) {
+ int value = kBaseValue + base::RandInt(-5, 5);
+ logging_.InsertGenericEvent(kRtt, value);
+ if (i % 2) {
+ logging_.InsertGenericEvent(kPacketLoss, value);
+ }
+ if (!(i % 4)) {
+ logging_.InsertGenericEvent(kJitter, value);
+ }
+ }
+ GenericRawMap raw_map = logging_.GetGenericRawData();
+ const GenericStatsMap* stats_map = logging_.GetGenericStatsData();
+ // Size of generic map = number of different events.
+ EXPECT_EQ(3u, raw_map.size());
+ EXPECT_EQ(3u, stats_map->size());
+ // Raw events - size of internal map = number of calls.
+ GenericRawMap::iterator rit = raw_map.find(kRtt);
+ EXPECT_EQ(kNumRuns, rit->second.value.size());
+ EXPECT_EQ(kNumRuns, rit->second.timestamp.size());
+ rit = raw_map.find(kPacketLoss);
+ EXPECT_EQ(kNumRuns / 2, rit->second.value.size());
+ EXPECT_EQ(kNumRuns / 2, rit->second.timestamp.size());
+ rit = raw_map.find(kJitter);
+ EXPECT_EQ(kNumRuns / 4, rit->second.value.size());
+ EXPECT_EQ(kNumRuns / 4, rit->second.timestamp.size());
+ // Stats - one value per event.
+ GenericStatsMap::const_iterator sit = stats_map->find(kRtt);
+ EXPECT_NEAR(kBaseValue, sit->second, 2.5);
+ sit = stats_map->find(kPacketLoss);
+ EXPECT_NEAR(kBaseValue, sit->second, 2.5);
+ sit = stats_map->find(kJitter);
+ EXPECT_NEAR(kBaseValue, sit->second, 2.5);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/cast_net_defines.h b/chromium/media/cast/net/cast_net_defines.h
new file mode 100644
index 00000000000..a9f1629a91a
--- /dev/null
+++ b/chromium/media/cast/net/cast_net_defines.h
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_NET_CAST_NET_DEFINES_H_
+#define MEDIA_CAST_NET_CAST_NET_DEFINES_H_
+
+#include "base/basictypes.h"
+
+namespace media {
+namespace cast {
+
+class FrameIdWrapHelper {
+ public:
+ FrameIdWrapHelper()
+ : first_(true),
+ frame_id_wrap_count_(0),
+ range_(kLowRange) {}
+
+ uint32 MapTo32bitsFrameId(const uint8 over_the_wire_frame_id) {
+ if (first_) {
+ first_ = false;
+ if (over_the_wire_frame_id == 0xff) {
+ // Special case for startup.
+ return kStartFrameId;
+ }
+ }
+
+ uint32 wrap_count = frame_id_wrap_count_;
+ switch (range_) {
+ case kLowRange:
+ if (over_the_wire_frame_id > kLowRangeThreshold &&
+ over_the_wire_frame_id < kHighRangeThreshold) {
+ range_ = kMiddleRange;
+ }
+ if (over_the_wire_frame_id > kHighRangeThreshold) {
+ // Wrap count was incremented in High->Low transition, but this frame
+ // is 'old', actually from before the wrap count got incremented.
+ --wrap_count;
+ }
+ break;
+ case kMiddleRange:
+ if (over_the_wire_frame_id > kHighRangeThreshold) {
+ range_ = kHighRange;
+ }
+ break;
+ case kHighRange:
+ if (over_the_wire_frame_id < kLowRangeThreshold) {
+ // Wrap-around detected.
+ range_ = kLowRange;
+ ++frame_id_wrap_count_;
+ // Frame triggering wrap-around so wrap count should be incremented as
+ // as well to match |frame_id_wrap_count_|.
+ ++wrap_count;
+ }
+ break;
+ }
+ return (wrap_count << 8) + over_the_wire_frame_id;
+ }
+
+ private:
+ enum Range {
+ kLowRange,
+ kMiddleRange,
+ kHighRange,
+ };
+
+ static const uint8 kLowRangeThreshold = 0x0f;
+ static const uint8 kHighRangeThreshold = 0xf0;
+ static const uint32 kStartFrameId = GG_UINT32_C(0xffffffff);
+
+ bool first_;
+ uint32 frame_id_wrap_count_;
+ Range range_;
+};
+
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_NET_CAST_NET_DEFINES_H_
diff --git a/chromium/media/cast/net/frame_id_wrap_helper_test.cc b/chromium/media/cast/net/frame_id_wrap_helper_test.cc
new file mode 100644
index 00000000000..f6b89b01d22
--- /dev/null
+++ b/chromium/media/cast/net/frame_id_wrap_helper_test.cc
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <gtest/gtest.h>
+#include "media/cast/net/cast_net_defines.h"
+
+namespace media {
+namespace cast {
+
+class FrameIdWrapHelperTest : public ::testing::Test {
+ protected:
+ FrameIdWrapHelperTest() {}
+ virtual ~FrameIdWrapHelperTest() {}
+
+ FrameIdWrapHelper frame_id_wrap_helper_;
+};
+
+TEST_F(FrameIdWrapHelperTest, FirstFrame) {
+ EXPECT_EQ(kStartFrameId, frame_id_wrap_helper_.MapTo32bitsFrameId(255u));
+}
+
+TEST_F(FrameIdWrapHelperTest, Rollover) {
+ uint32 new_frame_id = 0u;
+ for (int i = 0; i <= 256; ++i) {
+ new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(
+ static_cast<uint8>(i));
+ }
+ EXPECT_EQ(256u, new_frame_id);
+}
+
+TEST_F(FrameIdWrapHelperTest, OutOfOrder) {
+ uint32 new_frame_id = 0u;
+ for (int i = 0; i < 255; ++i) {
+ new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(
+ static_cast<uint8>(i));
+ }
+ EXPECT_EQ(254u, new_frame_id);
+ new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(0u);
+ EXPECT_EQ(256u, new_frame_id);
+ new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(255u);
+ EXPECT_EQ(255u, new_frame_id);
+ new_frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(1u);
+ EXPECT_EQ(257u, new_frame_id);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc b/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc
new file mode 100644
index 00000000000..6caf8f6390e
--- /dev/null
+++ b/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+
+namespace media {
+namespace cast {
+
+MockPacedPacketSender::MockPacedPacketSender() {
+}
+
+MockPacedPacketSender::~MockPacedPacketSender() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.h b/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
new file mode 100644
index 00000000000..9933516f14c
--- /dev/null
+++ b/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
+#define MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
+
+#include "media/cast/net/pacing/paced_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockPacedPacketSender : public PacedPacketSender {
+ public:
+ MockPacedPacketSender();
+ virtual ~MockPacedPacketSender();
+
+ MOCK_METHOD1(SendPackets, bool(const PacketList& packets));
+ MOCK_METHOD1(ResendPackets, bool(const PacketList& packets));
+ MOCK_METHOD1(SendRtcpPacket, bool(const Packet& packet));
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/net/pacing/paced_sender.cc b/chromium/media/cast/net/pacing/paced_sender.cc
new file mode 100644
index 00000000000..8a07380df0d
--- /dev/null
+++ b/chromium/media/cast/net/pacing/paced_sender.cc
@@ -0,0 +1,148 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/net/pacing/paced_sender.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kPacingIntervalMs = 10;
+// Each frame will be split into no more than kPacingMaxBurstsPerFrame
+// bursts of packets.
+static const size_t kPacingMaxBurstsPerFrame = 3;
+
+PacedSender::PacedSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacketSender* transport)
+ : cast_environment_(cast_environment),
+ burst_size_(1),
+ packets_sent_in_burst_(0),
+ transport_(transport),
+ weak_factory_(this) {
+ ScheduleNextSend();
+}
+
+PacedSender::~PacedSender() {}
+
+bool PacedSender::SendPackets(const PacketList& packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToPacer,
+ packets);
+ return SendPacketsToTransport(packets, &packet_list_);
+}
+
+bool PacedSender::ResendPackets(const PacketList& packets) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketRetransmited,
+ packets);
+ return SendPacketsToTransport(packets, &resend_packet_list_);
+}
+
+bool PacedSender::SendPacketsToTransport(const PacketList& packets,
+ PacketList* packets_not_sent) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ UpdateBurstSize(packets.size());
+
+ if (!packets_not_sent->empty()) {
+ packets_not_sent->insert(packets_not_sent->end(),
+ packets.begin(), packets.end());
+ return true;
+ }
+ PacketList packets_to_send;
+ PacketList::const_iterator first_to_store_it = packets.begin();
+
+ size_t max_packets_to_send_now = burst_size_ - packets_sent_in_burst_;
+ if (max_packets_to_send_now > 0) {
+ size_t packets_to_send_now = std::min(max_packets_to_send_now,
+ packets.size());
+
+ std::advance(first_to_store_it, packets_to_send_now);
+ packets_to_send.insert(packets_to_send.begin(),
+ packets.begin(), first_to_store_it);
+ }
+ packets_not_sent->insert(packets_not_sent->end(),
+ first_to_store_it, packets.end());
+ packets_sent_in_burst_ += packets_to_send.size();
+ if (packets_to_send.empty()) return true;
+
+ cast_environment_->Logging()->InsertPacketListEvent(kPacketSentToNetwork,
+ packets);
+ return transport_->SendPackets(packets_to_send);
+}
+
+bool PacedSender::SendRtcpPacket(const Packet& packet) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // We pass the RTCP packets straight through.
+ return transport_->SendPacket(packet);
+}
+
+void PacedSender::ScheduleNextSend() {
+ base::TimeDelta time_to_next = time_last_process_ -
+ cast_environment_->Clock()->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
+
+ time_to_next = std::max(time_to_next, base::TimeDelta());
+
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&PacedSender::SendNextPacketBurst, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void PacedSender::SendNextPacketBurst() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ SendStoredPackets();
+ time_last_process_ = cast_environment_->Clock()->NowTicks();
+ ScheduleNextSend();
+}
+
+void PacedSender::SendStoredPackets() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (packet_list_.empty() && resend_packet_list_.empty()) return;
+
+ size_t packets_to_send = burst_size_;
+ PacketList packets_to_resend;
+
+ // Send our re-send packets first.
+ if (!resend_packet_list_.empty()) {
+ PacketList::iterator it = resend_packet_list_.begin();
+ size_t packets_to_send_now = std::min(packets_to_send,
+ resend_packet_list_.size());
+ std::advance(it, packets_to_send_now);
+ packets_to_resend.insert(packets_to_resend.begin(),
+ resend_packet_list_.begin(), it);
+ resend_packet_list_.erase(resend_packet_list_.begin(), it);
+ packets_to_send -= packets_to_resend.size();
+ }
+ if (!packet_list_.empty() && packets_to_send > 0) {
+ PacketList::iterator it = packet_list_.begin();
+ size_t packets_to_send_now = std::min(packets_to_send,
+ packet_list_.size());
+
+ std::advance(it, packets_to_send_now);
+ packets_to_resend.insert(packets_to_resend.end(),
+ packet_list_.begin(), it);
+ packet_list_.erase(packet_list_.begin(), it);
+
+ if (packet_list_.empty()) {
+ burst_size_ = 1; // Reset burst size after we sent the last stored packet
+ packets_sent_in_burst_ = 0;
+ }
+ }
+ transport_->SendPackets(packets_to_resend);
+}
+
+void PacedSender::UpdateBurstSize(size_t packets_to_send) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ packets_to_send = std::max(packets_to_send,
+ resend_packet_list_.size() + packet_list_.size());
+
+ packets_to_send += (kPacingMaxBurstsPerFrame - 1); // Round up.
+ burst_size_ = std::max(packets_to_send / kPacingMaxBurstsPerFrame,
+ burst_size_);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/pacing/paced_sender.gyp b/chromium/media/cast/net/pacing/paced_sender.gyp
index 53a1cdb1ef8..1947dd4ec40 100644
--- a/chromium/media/cast/pacing/paced_sender.gyp
+++ b/chromium/media/cast/net/pacing/paced_sender.gyp
@@ -5,7 +5,7 @@
{
'targets': [
{
- 'target_name': 'paced_sender',
+ 'target_name': 'cast_paced_sender',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
@@ -16,7 +16,6 @@
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
], # targets
diff --git a/chromium/media/cast/pacing/paced_sender.h b/chromium/media/cast/net/pacing/paced_sender.h
index 9dcd03e8469..89283257134 100644
--- a/chromium/media/cast/pacing/paced_sender.h
+++ b/chromium/media/cast/net/pacing/paced_sender.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_PACING_PACED_SENDER_H_
-#define MEDIA_CAST_PACING_PACED_SENDER_H_
+#ifndef MEDIA_CAST_NET_PACING_PACED_SENDER_H_
+#define MEDIA_CAST_NET_PACING_PACED_SENDER_H_
#include <list>
#include <vector>
@@ -16,21 +16,20 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
namespace media {
namespace cast {
+// We have this pure virtual class to enable mocking.
class PacedPacketSender {
public:
// Inform the pacer / sender of the total number of packets.
- virtual bool SendPacket(const std::vector<uint8>& packet,
- int num_of_packets) = 0;
+ virtual bool SendPackets(const PacketList& packets) = 0;
- virtual bool ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets) = 0;
+ virtual bool ResendPackets(const PacketList& packets) = 0;
- virtual bool SendRtcpPacket(const std::vector<uint8>& packet) = 0;
+ virtual bool SendRtcpPacket(const Packet& packet) = 0;
virtual ~PacedPacketSender() {}
};
@@ -39,20 +38,15 @@ class PacedSender : public PacedPacketSender,
public base::NonThreadSafe,
public base::SupportsWeakPtr<PacedSender> {
public:
- PacedSender(scoped_refptr<CastThread> cast_thread, PacketSender* transport);
+ PacedSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacketSender* transport);
virtual ~PacedSender();
- virtual bool SendPacket(const std::vector<uint8>& packet,
- int num_of_packets) OVERRIDE;
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE;
- virtual bool ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets) OVERRIDE;
+ virtual bool ResendPackets(const PacketList& packets) OVERRIDE;
- virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE;
-
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
+ virtual bool SendRtcpPacket(const Packet& packet) OVERRIDE;
protected:
// Schedule a delayed task on the main cast thread when it's time to send the
@@ -63,22 +57,21 @@ class PacedSender : public PacedPacketSender,
void SendNextPacketBurst();
private:
- void SendStoredPacket();
- void UpdateBurstSize(int num_of_packets);
-
- typedef std::list<std::vector<uint8> > PacketList;
-
- scoped_refptr<CastThread> cast_thread_;
- int burst_size_;
- int packets_sent_in_burst_;
+ bool SendPacketsToTransport(const PacketList& packets,
+ PacketList* packets_not_sent);
+ void SendStoredPackets();
+ void UpdateBurstSize(size_t num_of_packets);
+
+ scoped_refptr<CastEnvironment> cast_environment_;
+ size_t burst_size_;
+ size_t packets_sent_in_burst_;
base::TimeTicks time_last_process_;
+ // Note: We can't combine the |packet_list_| and the |resend_packet_list_|
+ // since then we might get reordering of the retransmitted packets.
PacketList packet_list_;
PacketList resend_packet_list_;
PacketSender* transport_;
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
-
base::WeakPtrFactory<PacedSender> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(PacedSender);
@@ -87,4 +80,4 @@ class PacedSender : public PacedPacketSender,
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_PACING_PACED_SENDER_H_
+#endif // MEDIA_CAST_NET_PACING_PACED_SENDER_H_
diff --git a/chromium/media/cast/net/pacing/paced_sender_unittest.cc b/chromium/media/cast/net/pacing/paced_sender_unittest.cc
new file mode 100644
index 00000000000..15b81362f69
--- /dev/null
+++ b/chromium/media/cast/net/pacing/paced_sender_unittest.cc
@@ -0,0 +1,257 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/test/fake_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using testing::_;
+
+static const uint8 kValue = 123;
+static const size_t kSize1 = 100;
+static const size_t kSize2 = 101;
+static const size_t kSize3 = 102;
+static const size_t kSize4 = 103;
+static const size_t kNackSize = 104;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+
+class TestPacketSender : public PacketSender {
+ public:
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ PacketList::const_iterator it = packets.begin();
+ for (; it != packets.end(); ++it) {
+ EXPECT_FALSE(expected_packet_size_.empty());
+ size_t expected_packet_size = expected_packet_size_.front();
+ expected_packet_size_.pop_front();
+ EXPECT_EQ(expected_packet_size, it->size());
+ }
+ return true;
+ }
+
+ virtual bool SendPacket(const Packet& packet) OVERRIDE {
+ return true;
+ }
+
+ void AddExpectedSize(int expected_packet_size, int repeat_count) {
+ for (int i = 0; i < repeat_count; ++i) {
+ expected_packet_size_.push_back(expected_packet_size);
+ }
+ }
+
+ private:
+ std::list<int> expected_packet_size_;
+};
+
+class PacedSenderTest : public ::testing::Test {
+ protected:
+ PacedSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ virtual ~PacedSenderTest() {}
+
+ virtual void SetUp() {
+ task_runner_ = new test::FakeTaskRunner(&testing_clock_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
+ paced_sender_.reset(new PacedSender(cast_environment_, &mock_transport_));
+ }
+
+ PacketList CreatePacketList(size_t packet_size, int num_of_packets_in_frame) {
+ PacketList packets;
+ for (int i = 0; i < num_of_packets_in_frame; ++i) {
+ packets.push_back(Packet(packet_size, kValue));
+ }
+ return packets;
+ }
+
+ base::SimpleTestTickClock testing_clock_;
+ TestPacketSender mock_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_ptr<PacedSender> paced_sender_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+};
+
+TEST_F(PacedSenderTest, PassThroughRtcp) {
+ mock_transport_.AddExpectedSize(kSize1, 1);
+ PacketList packets = CreatePacketList(kSize1, 1);
+
+ EXPECT_TRUE(paced_sender_->SendPackets(packets));
+ EXPECT_TRUE(paced_sender_->ResendPackets(packets));
+
+ mock_transport_.AddExpectedSize(kSize2, 1);
+ EXPECT_TRUE(paced_sender_->SendRtcpPacket(Packet(kSize2, kValue)));
+}
+
+TEST_F(PacedSenderTest, BasicPace) {
+ int num_of_packets = 9;
+ PacketList packets = CreatePacketList(kSize1, num_of_packets);
+
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ EXPECT_TRUE(paced_sender_->SendPackets(packets));
+
+ // Check that we get the next burst.
+ mock_transport_.AddExpectedSize(kSize1, 3);
+
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // If we call process too early make sure we don't send any packets.
+ timeout = base::TimeDelta::FromMilliseconds(5);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we get the next burst.
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Check that we don't get any more packets.
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+}
+
+TEST_F(PacedSenderTest, PaceWithNack) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ int num_of_packets_in_frame = 9;
+ int num_of_packets_in_nack = 9;
+
+ PacketList first_frame_packets =
+ CreatePacketList(kSize1, num_of_packets_in_frame);
+
+ PacketList second_frame_packets =
+ CreatePacketList(kSize2, num_of_packets_in_frame);
+
+ PacketList nack_packets =
+ CreatePacketList(kNackSize, num_of_packets_in_nack);
+
+ // Check that the first burst of the frame go out on the wire.
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
+
+ // Add first NACK request.
+ EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets));
+
+ // Check that we get the first NACK burst.
+ mock_transport_.AddExpectedSize(kNackSize, 5);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second NACK request.
+ EXPECT_TRUE(paced_sender_->ResendPackets(nack_packets));
+
+ // Check that we get the next NACK burst.
+ mock_transport_.AddExpectedSize(kNackSize, 7);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // End of NACK plus a packet from the oldest frame.
+ mock_transport_.AddExpectedSize(kNackSize, 6);
+ mock_transport_.AddExpectedSize(kSize1, 1);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Add second frame.
+ // Make sure we don't delay the second frame due to the previous packets.
+ EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
+
+ // Last packets of frame 1 and the first packets of frame 2.
+ mock_transport_.AddExpectedSize(kSize1, 5);
+ mock_transport_.AddExpectedSize(kSize2, 2);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // Last packets of frame 2.
+ mock_transport_.AddExpectedSize(kSize2, 7);
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+
+ // No more packets.
+ testing_clock_.Advance(timeout);
+ task_runner_->RunTasks();
+}
+
+TEST_F(PacedSenderTest, PaceWith60fps) {
+ // Testing what happen when we get multiple NACK requests for a fully lost
+ // frames just as we sent the first packets in a frame.
+ int num_of_packets_in_frame = 9;
+
+ PacketList first_frame_packets =
+ CreatePacketList(kSize1, num_of_packets_in_frame);
+
+ PacketList second_frame_packets =
+ CreatePacketList(kSize2, num_of_packets_in_frame);
+
+ PacketList third_frame_packets =
+ CreatePacketList(kSize3, num_of_packets_in_frame);
+
+ PacketList fourth_frame_packets =
+ CreatePacketList(kSize4, num_of_packets_in_frame);
+
+ base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
+
+ // Check that the first burst of the frame go out on the wire.
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ EXPECT_TRUE(paced_sender_->SendPackets(first_frame_packets));
+
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
+
+ // Add second frame, after 16 ms.
+ EXPECT_TRUE(paced_sender_->SendPackets(second_frame_packets));
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(4));
+
+ mock_transport_.AddExpectedSize(kSize1, 3);
+ mock_transport_.AddExpectedSize(kSize2, 1);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ mock_transport_.AddExpectedSize(kSize2, 4);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
+
+ // Add third frame, after 33 ms.
+ EXPECT_TRUE(paced_sender_->SendPackets(third_frame_packets));
+ mock_transport_.AddExpectedSize(kSize2, 4);
+ mock_transport_.AddExpectedSize(kSize3, 1);
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
+ task_runner_->RunTasks();
+
+ // Add fourth frame, after 50 ms.
+ EXPECT_TRUE(paced_sender_->SendPackets(fourth_frame_packets));
+
+ mock_transport_.AddExpectedSize(kSize3, 6);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ mock_transport_.AddExpectedSize(kSize3, 2);
+ mock_transport_.AddExpectedSize(kSize4, 4);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ mock_transport_.AddExpectedSize(kSize4, 5);
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+
+ testing_clock_.Advance(timeout_10ms);
+ task_runner_->RunTasks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/mock_rtp_sender.h b/chromium/media/cast/net/rtp_sender/mock_rtp_sender.h
index 334bc885db5..2c3f19f2ae9 100644
--- a/chromium/media/cast/rtp_sender/mock_rtp_sender.h
+++ b/chromium/media/cast/net/rtp_sender/mock_rtp_sender.h
@@ -7,7 +7,7 @@
#include <vector>
-#include "media/cast/rtp_sender/rtp_sender.h"
+#include "media/cast/net/rtp_sender/rtp_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -22,7 +22,7 @@ class MockRtpSender : public RtpSender {
bool(const EncodedAudioFrame& frame, int64 recorded_time));
MOCK_METHOD3(ResendPacket,
- bool(bool is_audio, uint8 frame_id, uint16 packet_id));
+ bool(bool is_audio, uint32 frame_id, uint16 packet_id));
MOCK_METHOD0(RtpStatistics, void());
};
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc
index 9c2d7ff0884..3bd8f900665 100644
--- a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.cc
+++ b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
#include <string>
#include "base/logging.h"
-#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
namespace media {
@@ -25,24 +24,23 @@ class StoredPacket {
packet_.reserve(kIpPacketSize);
}
- void Save(const std::vector<uint8>& packet) {
- DCHECK_LT(packet.size(), kIpPacketSize) << "Invalid argument";
+ void Save(const Packet* packet) {
+ DCHECK_LT(packet->size(), kIpPacketSize) << "Invalid argument";
packet_.clear();
- packet_.insert(packet_.begin(), packet.begin(), packet.end());
+ packet_.insert(packet_.begin(), packet->begin(), packet->end());
}
- void GetCopy(std::vector<uint8>* packet) {
- packet->insert(packet->begin(), packet_.begin(), packet_.end());
+ void GetCopy(PacketList* packets) {
+ packets->push_back(Packet(packet_.begin(), packet_.end()));
}
private:
- std::vector<uint8> packet_;
+ Packet packet_;
};
-
-PacketStorage::PacketStorage(int max_time_stored_ms)
- : default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {
+PacketStorage::PacketStorage(base::TickClock* clock,
+ int max_time_stored_ms)
+ : clock_(clock) {
max_time_stored_ = base::TimeDelta::FromMilliseconds(max_time_stored_ms);
DCHECK_LE(max_time_stored_ms, kMaxAllowedTimeStoredMs) << "Invalid argument";
}
@@ -98,13 +96,13 @@ void PacketStorage::CleanupOldPackets(base::TimeTicks now) {
}
}
-void PacketStorage::StorePacket(uint8 frame_id,
- uint16 packet_id,
- const std::vector<uint8>& packet) {
+void PacketStorage::StorePacket(uint32 frame_id, uint16 packet_id,
+ const Packet* packet) {
base::TimeTicks now = clock_->NowTicks();
CleanupOldPackets(now);
- uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
+ // Internally we only use the 8 LSB of the frame id.
+ uint32 index = ((0xff & frame_id) << 16) + packet_id;
PacketMapIterator it = stored_packets_.find(index);
if (it != stored_packets_.end()) {
// We have already saved this.
@@ -125,15 +123,50 @@ void PacketStorage::StorePacket(uint8 frame_id,
time_to_packet_map_.insert(std::make_pair(now, index));
}
+PacketList PacketStorage::GetPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
+ PacketList packets_to_resend;
+
+ // Iterate over all frames in the list.
+ for (MissingFramesAndPacketsMap::const_iterator it =
+ missing_frames_and_packets.begin();
+ it != missing_frames_and_packets.end(); ++it) {
+ uint8 frame_id = it->first;
+ const PacketIdSet& packets_set = it->second;
+ bool success = false;
+
+ if (packets_set.empty()) {
+ VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
+
+ uint16 packet_id = 0;
+ do {
+ // Get packet from storage.
+ success = GetPacket(frame_id, packet_id, &packets_to_resend);
+ ++packet_id;
+ } while (success);
+ } else {
+ // Iterate over all of the packets in the frame.
+ for (PacketIdSet::const_iterator set_it = packets_set.begin();
+ set_it != packets_set.end(); ++set_it) {
+ GetPacket(frame_id, *set_it, &packets_to_resend);
+ }
+ }
+ }
+ return packets_to_resend;
+}
+
bool PacketStorage::GetPacket(uint8 frame_id,
uint16 packet_id,
- std::vector<uint8>* packet) {
+ PacketList* packets) {
+ // Internally we only use the 8 LSB of the frame id.
uint32 index = (static_cast<uint32>(frame_id) << 16) + packet_id;
PacketMapIterator it = stored_packets_.find(index);
if (it == stored_packets_.end()) {
return false;
}
- it->second->GetCopy(packet);
+ it->second->GetCopy(packets);
+ VLOG(1) << "Resend " << static_cast<int>(frame_id)
+ << ":" << packet_id;
return true;
}
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp
index f691d9e9b69..f691d9e9b69 100644
--- a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.gypi
+++ b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.gyp
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.h b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h
index e1e3bcbe121..34933ef5f6d 100644
--- a/chromium/media/cast/rtp_sender/packet_storage/packet_storage.h
+++ b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
-#define MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
+#ifndef MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+#define MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
#include <list>
#include <map>
@@ -12,9 +12,9 @@
#include "base/basictypes.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
+#include "media/cast/cast_config.h"
namespace media {
namespace cast {
@@ -27,33 +27,29 @@ class PacketStorage {
public:
static const int kMaxStoredPackets = 1000;
- explicit PacketStorage(int max_time_stored_ms);
+ PacketStorage(base::TickClock* clock, int max_time_stored_ms);
virtual ~PacketStorage();
- void StorePacket(uint8 frame_id,
- uint16 packet_id,
- const std::vector<uint8>& packet);
+ void StorePacket(uint32 frame_id, uint16 packet_id, const Packet* packet);
- // Copies packet into the buffer pointed to by rtp_buffer.
- bool GetPacket(uint8 frame_id,
- uint16 packet_id,
- std::vector<uint8>* packet);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
+ // Copies all missing packets into the packet list.
+ PacketList GetPackets(
+ const MissingFramesAndPacketsMap& missing_frames_and_packets);
+
+ // Copies packet into the packet list.
+ bool GetPacket(uint8 frame_id, uint16 packet_id, PacketList* packets);
private:
void CleanupOldPackets(base::TimeTicks now);
+ base::TickClock* const clock_; // Not owned by this class.
base::TimeDelta max_time_stored_;
PacketMap stored_packets_;
TimeToPacketMap time_to_packet_map_;
std::list<linked_ptr<StoredPacket> > free_packets_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_SENDER_PACKET_STORAGE_INCLUDE_PACKET_STORAGE_H_
+#endif // MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
diff --git a/chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc
index d6de08d4866..049d3ae29b6 100644
--- a/chromium/media/cast/rtp_sender/packet_storage/packet_storage_unittest.cc
+++ b/chromium/media/cast/net/rtp_sender/packet_storage/packet_storage_unittest.cc
@@ -2,15 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
+#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
#include <vector>
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
@@ -19,91 +17,89 @@ static const int kMaxDeltaStoredMs = 500;
static const base::TimeDelta kDeltaBetweenFrames =
base::TimeDelta::FromMilliseconds(33);
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
class PacketStorageTest : public ::testing::Test {
protected:
- PacketStorageTest() : packet_storage_(kMaxDeltaStoredMs) {
+ PacketStorageTest() : packet_storage_(&testing_clock_, kMaxDeltaStoredMs) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
- packet_storage_.set_clock(&testing_clock_);
}
- PacketStorage packet_storage_;
base::SimpleTestTickClock testing_clock_;
+ PacketStorage packet_storage_;
};
TEST_F(PacketStorageTest, TimeOut) {
- std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
-
- for (uint8 frame_id = 0; frame_id < 30; ++frame_id) {
+ Packet test_123(100, 123); // 100 insertions of the value 123.
+ PacketList packets;
+ for (uint32 frame_id = 0; frame_id < 30; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ packet_storage_.StorePacket(frame_id, packet_id, &test_123);
}
testing_clock_.Advance(kDeltaBetweenFrames);
}
// All packets belonging to the first 14 frames is expected to be expired.
- for (uint8 frame_id = 0; frame_id < 14; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 14; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- std::vector<uint8> packet;
- EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ Packet packet;
+ EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
}
}
// All packets belonging to the next 15 frames is expected to be valid.
- for (uint8 frame_id = 14; frame_id < 30; ++frame_id) {
+ for (uint32 frame_id = 14; frame_id < 30; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- std::vector<uint8> packet;
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
- EXPECT_TRUE(packet == test_123);
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
+ EXPECT_TRUE(packets.front() == test_123);
}
}
}
TEST_F(PacketStorageTest, MaxNumberOfPackets) {
- std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
+ Packet test_123(100, 123); // 100 insertions of the value 123.
+ PacketList packets;
- uint8 frame_id = 0;
+ uint32 frame_id = 0;
for (uint16 packet_id = 0; packet_id <= PacketStorage::kMaxStoredPackets;
++packet_id) {
- packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ packet_storage_.StorePacket(frame_id, packet_id, &test_123);
}
- std::vector<uint8> packet;
+ Packet packet;
uint16 packet_id = 0;
- EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ EXPECT_FALSE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
++packet_id;
for (; packet_id <= PacketStorage::kMaxStoredPackets; ++packet_id) {
- std::vector<uint8> packet;
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
- EXPECT_TRUE(packet == test_123);
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
+ EXPECT_TRUE(packets.back() == test_123);
}
}
TEST_F(PacketStorageTest, PacketContent) {
- std::vector<uint8> test_123(100, 123); // 100 insertions of the value 123.
- std::vector<uint8> test_234(200, 234); // 200 insertions of the value 234.
+ Packet test_123(100, 123); // 100 insertions of the value 123.
+ Packet test_234(200, 234); // 200 insertions of the value 234.
+ PacketList packets;
- for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
// Every other packet.
if (packet_id % 2 == 0) {
- packet_storage_.StorePacket(frame_id, packet_id, test_123);
+ packet_storage_.StorePacket(frame_id, packet_id, &test_123);
} else {
- packet_storage_.StorePacket(frame_id, packet_id, test_234);
+ packet_storage_.StorePacket(frame_id, packet_id, &test_234);
}
}
testing_clock_.Advance(kDeltaBetweenFrames);
}
- for (uint8 frame_id = 0; frame_id < 10; ++frame_id) {
+ for (uint32 frame_id = 0; frame_id < 10; ++frame_id) {
for (uint16 packet_id = 0; packet_id < 10; ++packet_id) {
- std::vector<uint8> packet;
- EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packet));
+ EXPECT_TRUE(packet_storage_.GetPacket(frame_id, packet_id, &packets));
// Every other packet.
if (packet_id % 2 == 0) {
- EXPECT_TRUE(packet == test_123);
+ EXPECT_TRUE(packets.back() == test_123);
} else {
- EXPECT_TRUE(packet == test_234);
+ EXPECT_TRUE(packets.back() == test_234);
}
}
}
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index 6900bc24b38..8a50f8a8aad 100644
--- a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
#include "base/logging.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "net/base/big_endian.h"
namespace media {
@@ -25,7 +25,6 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
packet_storage_(packet_storage),
sequence_number_(config_.sequence_number),
rtp_timestamp_(config_.rtp_timestamp),
- frame_id_(0),
packet_id_(0),
send_packets_count_(0),
send_octet_count_(0) {
@@ -40,14 +39,12 @@ void RtpPacketizer::IncomingEncodedVideoFrame(
DCHECK(!config_.audio) << "Invalid state";
if (config_.audio) return;
- base::TimeTicks zero_time;
- base::TimeDelta capture_delta = capture_time - zero_time;
-
// Timestamp is in 90 KHz for video.
- rtp_timestamp_ = static_cast<uint32>(capture_delta.InMilliseconds() * 90);
+ rtp_timestamp_ = GetVideoRtpTimestamp(capture_time);
time_last_sent_rtp_timestamp_ = capture_time;
Cast(video_frame->key_frame,
+ video_frame->frame_id,
video_frame->last_referenced_frame_id,
rtp_timestamp_,
video_frame->data);
@@ -61,7 +58,7 @@ void RtpPacketizer::IncomingEncodedAudioFrame(
rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
time_last_sent_rtp_timestamp_ = recorded_time;
- Cast(true, 0, rtp_timestamp_, audio_frame->data);
+ Cast(true, audio_frame->frame_id, 0, rtp_timestamp_, audio_frame->data);
}
uint16 RtpPacketizer::NextSequenceNumber() {
@@ -78,65 +75,74 @@ bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
return true;
}
+// TODO(mikhal): Switch to pass data with a const_ref.
void RtpPacketizer::Cast(bool is_key,
- uint8 reference_frame_id,
+ uint32 frame_id,
+ uint32 reference_frame_id,
uint32 timestamp,
- std::vector<uint8> data) {
+ const std::string& data) {
uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
+
// Split the payload evenly (round number up).
- uint32 num_packets = (data.size() + max_length) / max_length;
- uint32 payload_length = (data.size() + num_packets) / num_packets;
+ size_t num_packets = (data.size() + max_length) / max_length;
+ size_t payload_length = (data.size() + num_packets) / num_packets;
DCHECK_LE(payload_length, max_length) << "Invalid argument";
- std::vector<uint8> packet;
- packet.reserve(kIpPacketSize);
+ PacketList packets;
+
size_t remaining_size = data.size();
- uint8* data_ptr = data.data();
+ std::string::const_iterator data_iter = data.begin();
while (remaining_size > 0) {
- packet.clear();
+ Packet packet;
+
if (remaining_size < payload_length) {
payload_length = remaining_size;
}
remaining_size -= payload_length;
BuildCommonRTPheader(&packet, remaining_size == 0, timestamp);
+
// Build Cast header.
packet.push_back(
(is_key ? kCastKeyFrameBitMask : 0) | kCastReferenceFrameIdBitMask);
- packet.push_back(frame_id_);
- int start_size = packet.size();
- packet.resize(start_size + 32);
- net::BigEndianWriter big_endian_writer(&((packet)[start_size]), 32);
+ packet.push_back(frame_id);
+ size_t start_size = packet.size();
+ packet.resize(start_size + 4);
+ net::BigEndianWriter big_endian_writer(&(packet[start_size]), 4);
big_endian_writer.WriteU16(packet_id_);
- big_endian_writer.WriteU16(num_packets - 1);
- packet.push_back(reference_frame_id);
+ big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
+ packet.push_back(static_cast<uint8>(reference_frame_id));
// Copy payload data.
- packet.insert(packet.end(), data_ptr, data_ptr + payload_length);
+ packet.insert(packet.end(), data_iter, data_iter + payload_length);
+
// Store packet.
- packet_storage_->StorePacket(frame_id_, packet_id_, packet);
- // Send to network.
- transport_->SendPacket(packet, num_packets);
+ packet_storage_->StorePacket(frame_id, packet_id_, &packet);
++packet_id_;
- data_ptr += payload_length;
+ data_iter += payload_length;
+
// Update stats.
++send_packets_count_;
send_octet_count_ += payload_length;
+ packets.push_back(packet);
}
DCHECK(packet_id_ == num_packets) << "Invalid state";
+
+ // Send to network.
+ transport_->SendPackets(packets);
+
// Prepare for next frame.
packet_id_ = 0;
- frame_id_ = static_cast<uint8>(frame_id_ + 1);
}
void RtpPacketizer::BuildCommonRTPheader(
- std::vector<uint8>* packet, bool marker_bit, uint32 time_stamp) {
+ Packet* packet, bool marker_bit, uint32 time_stamp) {
packet->push_back(0x80);
packet->push_back(static_cast<uint8>(config_.payload_type) |
(marker_bit ? kRtpMarkerBitMask : 0));
- int start_size = packet->size();
- packet->resize(start_size + 80);
- net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 80);
+ size_t start_size = packet->size();
+ packet->resize(start_size + 10);
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 10);
big_endian_writer.WriteU16(sequence_number_);
big_endian_writer.WriteU32(time_stamp);
big_endian_writer.WriteU32(config_.ssrc);
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp
index 09ceb3b6354..d75d8a66911 100644
--- a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.gypi
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.gyp
@@ -15,10 +15,12 @@
'sources': [
'rtp_packetizer.cc',
'rtp_packetizer.h',
+ 'rtp_packetizer_config.cc',
+ 'rtp_packetizer_config.h',
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/net/net.gyp:*',
+ '<(DEPTH)/net/net.gyp:net',
],
},
],
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h
index 63035d098db..9f9be5fe163 100644
--- a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
-#define MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#ifndef MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#define MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
#include <cmath>
#include <list>
#include <map>
#include "base/time/time.h"
-#include "media/cast/rtp_common/rtp_defines.h"
-#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
+#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
namespace media {
namespace cast {
@@ -45,11 +44,13 @@ class RtpPacketizer {
uint16 NextSequenceNumber();
int send_packets_count() { return send_packets_count_; }
- int send_octet_count() { return send_octet_count_; }
+
+ size_t send_octet_count() { return send_octet_count_; }
private:
- void Cast(bool is_key, uint8 reference_frame_id,
- uint32 timestamp, std::vector<uint8> data);
+ void Cast(bool is_key, uint32 frame_id, uint32 reference_frame_id,
+ uint32 timestamp, const std::string& data);
+
void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
uint32 time_stamp);
@@ -60,14 +61,13 @@ class RtpPacketizer {
base::TimeTicks time_last_sent_rtp_timestamp_;
uint16 sequence_number_;
uint32 rtp_timestamp_;
- uint8 frame_id_;
uint16 packet_id_;
int send_packets_count_;
- int send_octet_count_;
+ size_t send_octet_count_;
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#endif // MEDIA_CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
diff --git a/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc
new file mode 100644
index 00000000000..5fe3a92b61b
--- /dev/null
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.cc
@@ -0,0 +1,21 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
+
+namespace media {
+namespace cast {
+
+RtpPacketizerConfig::RtpPacketizerConfig()
+ : ssrc(0),
+ max_payload_length(kIpPacketSize - 28), // Default is IP-v4/UDP.
+ audio(false),
+ frequency(8000),
+ payload_type(-1),
+ sequence_number(0),
+ rtp_timestamp(0) {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
index cd005d53c85..1a2549e66b2 100644
--- a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h
@@ -2,25 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
-#define CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
+#ifndef CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
+#define CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
#include "media/cast/cast_config.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
struct RtpPacketizerConfig {
- RtpPacketizerConfig() {
- ssrc = 0;
- max_payload_length = kIpPacketSize - 28; // Default is IP-v4/UDP.
- audio = false;
- frequency = 8000;
- payload_type = -1;
- sequence_number = 0;
- rtp_timestamp = 0;
- }
+ RtpPacketizerConfig();
// General.
bool audio;
@@ -44,4 +36,4 @@ struct RtpPacketizerConfig {
} // namespace cast
} // namespace media
-#endif // CAST_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
+#endif // CAST_NET_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_CONFIG_H_
diff --git a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index bed7cba2e8f..defdecf7584 100644
--- a/chromium/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/chromium/media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
-
-#include <gtest/gtest.h>
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_config.h"
-#include "media/cast/pacing/paced_sender.h"
-#include "media/cast/rtp_common/rtp_defines.h"
-#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
+#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
@@ -19,13 +18,9 @@ namespace cast {
static const int kPayload = 127;
static const uint32 kTimestampMs = 10;
static const uint16 kSeqNum = 33;
-static const int kTimeOffset = 22222;
static const int kMaxPacketLength = 1500;
-static const bool kMarkerBit = true;
static const int kSsrc = 0x12345;
-static const uint8 kFrameId = 1;
static const unsigned int kFrameSize = 5000;
-static const int kTotalHeaderLength = 19;
static const int kMaxPacketStorageTimeMs = 300;
class TestRtpPacketTransport : public PacedPacketSender {
@@ -34,41 +29,50 @@ class TestRtpPacketTransport : public PacedPacketSender {
: config_(config),
sequence_number_(kSeqNum),
packets_sent_(0),
- expected_number_of_packets_(0) {}
+ expected_number_of_packets_(0),
+ expected_packet_id_(0),
+ expected_frame_id_(0) {}
- void VerifyRtpHeader(const RtpCastHeader& rtp_header) {
+ void VerifyRtpHeader(const RtpCastTestHeader& rtp_header) {
VerifyCommonRtpHeader(rtp_header);
VerifyCastRtpHeader(rtp_header);
}
- void VerifyCommonRtpHeader(const RtpCastHeader& rtp_header) {
+ void VerifyCommonRtpHeader(const RtpCastTestHeader& rtp_header) {
EXPECT_EQ(expected_number_of_packets_ == packets_sent_,
- rtp_header.webrtc.header.markerBit);
- EXPECT_EQ(kPayload, rtp_header.webrtc.header.payloadType);
- EXPECT_EQ(sequence_number_, rtp_header.webrtc.header.sequenceNumber);
- EXPECT_EQ(kTimestampMs * 90, rtp_header.webrtc.header.timestamp);
- EXPECT_EQ(config_.ssrc, rtp_header.webrtc.header.ssrc);
- EXPECT_EQ(0, rtp_header.webrtc.header.numCSRCs);
+ rtp_header.marker);
+ EXPECT_EQ(kPayload, rtp_header.payload_type);
+ EXPECT_EQ(sequence_number_, rtp_header.sequence_number);
+ EXPECT_EQ(kTimestampMs * 90, rtp_header.rtp_timestamp);
+ EXPECT_EQ(config_.ssrc, rtp_header.ssrc);
+ EXPECT_EQ(0, rtp_header.num_csrcs);
}
- void VerifyCastRtpHeader(const RtpCastHeader& rtp_header) {
- // TODO(mikhal)
+ void VerifyCastRtpHeader(const RtpCastTestHeader& rtp_header) {
+ EXPECT_FALSE(rtp_header.is_key_frame);
+ EXPECT_EQ(expected_frame_id_, rtp_header.frame_id);
+ EXPECT_EQ(expected_packet_id_, rtp_header.packet_id);
+ EXPECT_EQ(expected_number_of_packets_ - 1, rtp_header.max_packet_id);
+ EXPECT_TRUE(rtp_header.is_reference);
+ EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
}
- virtual bool SendPacket(const std::vector<uint8>& packet,
- int num_packets) OVERRIDE {
- EXPECT_EQ(expected_number_of_packets_, num_packets);
- ++packets_sent_;
- RtpHeaderParser parser(packet.data(), packet.size());
- RtpCastHeader rtp_header;
- parser.Parse(&rtp_header);
- VerifyRtpHeader(rtp_header);
- ++sequence_number_;
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
+ EXPECT_EQ(expected_number_of_packets_, static_cast<int>(packets.size()));
+ PacketList::const_iterator it = packets.begin();
+ for (; it != packets.end(); ++it) {
+ ++packets_sent_;
+ RtpHeaderParser parser(it->data(), it->size());
+ RtpCastTestHeader rtp_header;
+ parser.Parse(&rtp_header);
+ VerifyRtpHeader(rtp_header);
+ ++sequence_number_;
+ ++expected_packet_id_;
+ }
return true;
}
- virtual bool ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets) OVERRIDE {
+ virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
EXPECT_TRUE(false);
return false;
}
@@ -86,13 +90,16 @@ class TestRtpPacketTransport : public PacedPacketSender {
uint32 sequence_number_;
int packets_sent_;
int expected_number_of_packets_;
+ // Assuming packets arrive in sequence.
+ int expected_packet_id_;
+ uint32 expected_frame_id_;
};
class RtpPacketizerTest : public ::testing::Test {
protected:
RtpPacketizerTest()
:video_frame_(),
- packet_storage_(kMaxPacketStorageTimeMs) {
+ packet_storage_(&testing_clock_, kMaxPacketStorageTimeMs) {
config_.sequence_number = kSeqNum;
config_.ssrc = kSsrc;
config_.payload_type = kPayload;
@@ -102,15 +109,16 @@ class RtpPacketizerTest : public ::testing::Test {
new RtpPacketizer(transport_.get(), &packet_storage_, config_));
}
- ~RtpPacketizerTest() {}
+ virtual ~RtpPacketizerTest() {}
- void SetUp() {
+ virtual void SetUp() {
video_frame_.key_frame = false;
- video_frame_.frame_id = kFrameId;
- video_frame_.last_referenced_frame_id = kFrameId - 1;
+ video_frame_.frame_id = 0;
+ video_frame_.last_referenced_frame_id = kStartFrameId;
video_frame_.data.assign(kFrameSize, 123);
}
+ base::SimpleTestTickClock testing_clock_;
scoped_ptr<RtpPacketizer> rtp_packetizer_;
RtpPacketizerConfig config_;
scoped_ptr<TestRtpPacketTransport> transport_;
@@ -124,19 +132,19 @@ TEST_F(RtpPacketizerTest, SendStandardPackets) {
base::TimeTicks time;
time += base::TimeDelta::FromMilliseconds(kTimestampMs);
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,time);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
}
TEST_F(RtpPacketizerTest, Stats) {
EXPECT_FALSE(rtp_packetizer_->send_packets_count());
EXPECT_FALSE(rtp_packetizer_->send_octet_count());
// Insert packets at varying lengths.
- unsigned int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
+ int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
- base::TimeTicks time;
- time += base::TimeDelta::FromMilliseconds(kTimestampMs);
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,
+ testing_clock_.NowTicks());
EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
}
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.cc b/chromium/media/cast/net/rtp_sender/rtp_sender.cc
index ecaae40dd7a..2b017bc1784 100644
--- a/chromium/media/cast/rtp_sender/rtp_sender.cc
+++ b/chromium/media/cast/net/rtp_sender/rtp_sender.cc
@@ -2,35 +2,38 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_sender/rtp_sender.h"
+#include "media/cast/net/rtp_sender/rtp_sender.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_defines.h"
+#include "net/base/big_endian.h"
namespace media {
namespace cast {
-RtpSender::RtpSender(const AudioSenderConfig* audio_config,
+RtpSender::RtpSender(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
PacedPacketSender* transport)
- : config_(),
- transport_(transport),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()) {
+ : cast_environment_(cast_environment),
+ config_(),
+ transport_(transport) {
// Store generic cast config and create packetizer config.
DCHECK(audio_config || video_config) << "Invalid argument";
if (audio_config) {
- storage_.reset(new PacketStorage(audio_config->rtp_history_ms));
+ storage_.reset(new PacketStorage(cast_environment->Clock(),
+ audio_config->rtp_history_ms));
config_.audio = true;
config_.ssrc = audio_config->sender_ssrc;
config_.payload_type = audio_config->rtp_payload_type;
config_.frequency = audio_config->frequency;
config_.audio_codec = audio_config->codec;
} else {
- storage_.reset(new PacketStorage(video_config->rtp_history_ms));
+ storage_.reset(new PacketStorage(cast_environment->Clock(),
+ video_config->rtp_history_ms));
config_.audio = false;
config_.ssrc = video_config->sender_ssrc;
config_.payload_type = video_config->rtp_payload_type;
@@ -57,61 +60,56 @@ void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
}
void RtpSender::ResendPackets(
- const MissingFramesAndPackets& missing_frames_and_packets) {
- std::vector<uint8> packet;
+ const MissingFramesAndPacketsMap& missing_frames_and_packets) {
// Iterate over all frames in the list.
- for (std::map<uint8, std::set<uint16> >::const_iterator it =
+ for (MissingFramesAndPacketsMap::const_iterator it =
missing_frames_and_packets.begin();
it != missing_frames_and_packets.end(); ++it) {
+ PacketList packets_to_resend;
uint8 frame_id = it->first;
- // Iterate over all of the packets in the frame.
- const std::set<uint16>& packets = it->second;
- if (packets.empty()) {
+ const PacketIdSet& packets_set = it->second;
+ bool success = false;
+
+ if (packets_set.empty()) {
VLOG(1) << "Missing all packets in frame " << static_cast<int>(frame_id);
- bool success = false;
uint16 packet_id = 0;
do {
// Get packet from storage.
- packet.clear();
- success = storage_->GetPacket(frame_id, packet_id, &packet);
+ success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
// Resend packet to the network.
if (success) {
- VLOG(1) << "Resend " << static_cast<int>(frame_id) << ":"
- << packet_id << " size: " << packets.size();
+ VLOG(1) << "Resend " << static_cast<int>(frame_id)
+ << ":" << packet_id;
// Set a unique incremental sequence number for every packet.
+ Packet& packet = packets_to_resend.back();
UpdateSequenceNumber(&packet);
// Set the size as correspond to each frame.
- transport_->ResendPacket(packet, packets.size());
++packet_id;
}
} while (success);
-
} else {
- for (std::set<uint16>::const_iterator set_it = packets.begin();
- set_it != packets.end(); ++set_it) {
+ // Iterate over all of the packets in the frame.
+ for (PacketIdSet::const_iterator set_it = packets_set.begin();
+ set_it != packets_set.end(); ++set_it) {
uint16 packet_id = *set_it;
- // Get packet from storage.
- packet.clear();
- bool success = storage_->GetPacket(frame_id, packet_id, &packet);
+ success = storage_->GetPacket(frame_id, packet_id, &packets_to_resend);
+
// Resend packet to the network.
if (success) {
- VLOG(1) << "Resend " << static_cast<int>(frame_id) << ":"
- << packet_id << " size: " << packet.size();
+ VLOG(1) << "Resend " << static_cast<int>(frame_id)
+ << ":" << packet_id;
+ Packet& packet = packets_to_resend.back();
UpdateSequenceNumber(&packet);
- // Set the size as correspond to each frame.
- transport_->ResendPacket(packet, packets.size());
- } else {
- VLOG(1) << "Failed to resend " << static_cast<int>(frame_id) << ":"
- << packet_id;
}
}
}
+ transport_->ResendPackets(packets_to_resend);
}
}
-void RtpSender::UpdateSequenceNumber(std::vector<uint8>* packet) {
+void RtpSender::UpdateSequenceNumber(Packet* packet) {
uint16 new_sequence_number = packetizer_->NextSequenceNumber();
int index = 2;
(*packet)[index] = (static_cast<uint8>(new_sequence_number));
@@ -126,8 +124,8 @@ void RtpSender::RtpStatistics(const base::TimeTicks& now,
// was captured.
uint32 ntp_seconds = 0;
uint32 ntp_fraction = 0;
- ConvertTimeToNtp(now, &ntp_seconds, &ntp_fraction);
- // sender_info->ntp_seconds = ntp_seconds;
+ ConvertTimeTicksToNtp(now, &ntp_seconds, &ntp_fraction);
+ sender_info->ntp_seconds = ntp_seconds;
sender_info->ntp_fraction = ntp_fraction;
base::TimeTicks time_sent;
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.gyp b/chromium/media/cast/net/rtp_sender/rtp_sender.gyp
index 77722c9d381..f689b99b149 100644
--- a/chromium/media/cast/rtp_sender/rtp_sender.gyp
+++ b/chromium/media/cast/net/rtp_sender/rtp_sender.gyp
@@ -18,9 +18,8 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
- 'packet_storage/packet_storage.gypi:*',
- 'rtp_packetizer/rtp_packetizer.gypi:*',
+ 'packet_storage/packet_storage.gyp:*',
+ 'rtp_packetizer/rtp_packetizer.gyp:*',
],
},
],
diff --git a/chromium/media/cast/rtp_sender/rtp_sender.h b/chromium/media/cast/net/rtp_sender/rtp_sender.h
index f6e59acba84..038165992db 100644
--- a/chromium/media/cast/rtp_sender/rtp_sender.h
+++ b/chromium/media/cast/net/rtp_sender/rtp_sender.h
@@ -4,20 +4,20 @@
// This file contains the interface to the cast RTP sender.
-#ifndef MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
-#define MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
+#ifndef MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
+#define MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
#include <map>
#include <set>
#include "base/memory/scoped_ptr.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h"
-#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/net/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
namespace media {
namespace cast {
@@ -25,15 +25,14 @@ namespace cast {
class PacedPacketSender;
struct RtcpSenderInfo;
-typedef std::map<uint8, std::set<uint16> > MissingFramesAndPackets;
-
// This object is only called from the main cast thread.
// This class handles splitting encoded audio and video frames into packets and
// add an RTP header to each packet. The sent packets are stored until they are
// acknowledged by the remote peer or timed out.
class RtpSender {
public:
- RtpSender(const AudioSenderConfig* audio_config,
+ RtpSender(scoped_refptr<CastEnvironment> cast_environment,
+ const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
PacedPacketSender* transport);
@@ -47,28 +46,21 @@ class RtpSender {
void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
const base::TimeTicks& recorded_time);
- void ResendPackets(const MissingFramesAndPackets& missing_packets);
+ void ResendPackets(const MissingFramesAndPacketsMap& missing_packets);
void RtpStatistics(const base::TimeTicks& now, RtcpSenderInfo* sender_info);
- // Used for testing.
- void set_clock(base::TickClock* clock) {
- // TODO(pwestin): review how we pass in a clock for testing.
- clock_ = clock;
- }
-
private:
void UpdateSequenceNumber(std::vector<uint8>* packet);
+ scoped_refptr<CastEnvironment> cast_environment_;
RtpPacketizerConfig config_;
scoped_ptr<RtpPacketizer> packetizer_;
scoped_ptr<PacketStorage> storage_;
PacedPacketSender* transport_;
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_SENDER_RTP_SENDER_H_
+#endif // MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
diff --git a/chromium/media/cast/pacing/mock_paced_packet_sender.h b/chromium/media/cast/pacing/mock_paced_packet_sender.h
deleted file mode 100644
index 40d3e622027..00000000000
--- a/chromium/media/cast/pacing/mock_paced_packet_sender.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
-#define MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
-
-#include "media/cast/pacing/paced_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockPacedPacketSender : public PacedPacketSender {
- public:
- MOCK_METHOD2(SendPacket,
- bool(const std::vector<uint8>& packet, int num_of_packets));
- MOCK_METHOD2(ResendPacket,
- bool(const std::vector<uint8>& packet, int num_of_packets));
- MOCK_METHOD1(SendRtcpPacket, bool(const std::vector<uint8>& packet));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/pacing/mock_packet_sender.h b/chromium/media/cast/pacing/mock_packet_sender.h
deleted file mode 100644
index bad9bac89d1..00000000000
--- a/chromium/media/cast/pacing/mock_packet_sender.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
-#define MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
-
-#include "media/cast/cast_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockPacketSender : public PacketSender {
- public:
- MOCK_METHOD2(SendPacket, bool(const uint8* packet, int length));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_PACING_MOCK_PACKET_SENDER_H_
diff --git a/chromium/media/cast/pacing/paced_sender.cc b/chromium/media/cast/pacing/paced_sender.cc
deleted file mode 100644
index d2935f3e65a..00000000000
--- a/chromium/media/cast/pacing/paced_sender.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/pacing/paced_sender.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-
-namespace media {
-namespace cast {
-
-static const int64 kPacingIntervalMs = 10;
-static const int kPacingMaxBurstsPerFrame = 3;
-
-PacedSender::PacedSender(scoped_refptr<CastThread> cast_thread,
- PacketSender* transport)
- : cast_thread_(cast_thread),
- burst_size_(1),
- packets_sent_in_burst_(0),
- transport_(transport),
- clock_(&default_tick_clock_),
- weak_factory_(this) {
- ScheduleNextSend();
-}
-
-PacedSender::~PacedSender() {}
-
-bool PacedSender::SendPacket(const std::vector<uint8>& packet,
- int num_of_packets_in_frame) {
- if (!packet_list_.empty()) {
- // We have a queue put the new packets last in the list.
- packet_list_.push_back(packet);
- UpdateBurstSize(num_of_packets_in_frame);
- return true;
- }
- UpdateBurstSize(num_of_packets_in_frame);
-
- if (packets_sent_in_burst_ >= burst_size_) {
- packet_list_.push_back(packet);
- return true;
- }
- ++packets_sent_in_burst_;
- return transport_->SendPacket(&(packet[0]), packet.size());
-}
-
-bool PacedSender::ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets_to_resend) {
- if (!packet_list_.empty() || !resend_packet_list_.empty()) {
- // We have a queue put the resend packets in the list.
- resend_packet_list_.push_back(packet);
- UpdateBurstSize(num_of_packets_to_resend);
- return true;
- }
- UpdateBurstSize(num_of_packets_to_resend);
-
- if (packets_sent_in_burst_ >= burst_size_) {
- resend_packet_list_.push_back(packet);
- return true;
- }
- ++packets_sent_in_burst_;
- return transport_->SendPacket(&(packet[0]), packet.size());
-}
-
-bool PacedSender::SendRtcpPacket(const std::vector<uint8>& packet) {
- // We pass the RTCP packets straight through.
- return transport_->SendPacket(&(packet[0]), packet.size());
-}
-
-void PacedSender::ScheduleNextSend() {
- base::TimeDelta time_to_next = time_last_process_ - clock_->NowTicks() +
- base::TimeDelta::FromMilliseconds(kPacingIntervalMs);
-
- time_to_next = std::max(time_to_next,
- base::TimeDelta::FromMilliseconds(0));
-
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
- base::Bind(&PacedSender::SendNextPacketBurst, weak_factory_.GetWeakPtr()),
- time_to_next);
-}
-
-void PacedSender::SendNextPacketBurst() {
- int packets_to_send = burst_size_;
- time_last_process_ = clock_->NowTicks();
- for (int i = 0; i < packets_to_send; ++i) {
- SendStoredPacket();
- }
- ScheduleNextSend();
-}
-
-void PacedSender::SendStoredPacket() {
- if (packet_list_.empty() && resend_packet_list_.empty()) return;
-
- if (!resend_packet_list_.empty()) {
- // Send our re-send packets first.
- const std::vector<uint8>& packet = resend_packet_list_.front();
- transport_->SendPacket(&(packet[0]), packet.size());
- resend_packet_list_.pop_front();
- } else {
- const std::vector<uint8>& packet = packet_list_.front();
- transport_->SendPacket(&(packet[0]), packet.size());
- packet_list_.pop_front();
-
- if (packet_list_.empty()) {
- burst_size_ = 1; // Reset burst size after we sent the last stored packet
- packets_sent_in_burst_ = 0;
- }
- }
-}
-
-void PacedSender::UpdateBurstSize(int packets_to_send) {
- packets_to_send = std::max(packets_to_send,
- static_cast<int>(resend_packet_list_.size() + packet_list_.size()));
-
- packets_to_send += (kPacingMaxBurstsPerFrame - 1); // Round up.
-
- burst_size_ = std::max(packets_to_send / kPacingMaxBurstsPerFrame,
- burst_size_);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/pacing/paced_sender_unittest.cc b/chromium/media/cast/pacing/paced_sender_unittest.cc
deleted file mode 100644
index b823b16fa7e..00000000000
--- a/chromium/media/cast/pacing/paced_sender_unittest.cc
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/pacing/mock_packet_sender.h"
-#include "media/cast/pacing/paced_sender.h"
-#include "media/cast/test/fake_task_runner.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-using testing::_;
-
-static const uint8 kValue = 123;
-static const size_t kSize1 = 100;
-static const size_t kSize2 = 101;
-static const size_t kSize3 = 102;
-static const size_t kSize4 = 103;
-static const size_t kNackSize = 104;
-static const int64 kStartMillisecond = 123456789;
-
-class PacedSenderTest : public ::testing::Test {
- protected:
- PacedSenderTest() {
- testing_clock_.Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- }
-
- virtual ~PacedSenderTest() {}
-
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
- paced_sender_.reset(new PacedSender(cast_thread_, &mock_transport_));
- paced_sender_->set_clock(&testing_clock_);
- }
-
- base::SimpleTestTickClock testing_clock_;
- MockPacketSender mock_transport_;
- scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_ptr<PacedSender> paced_sender_;
- scoped_refptr<CastThread> cast_thread_;
-};
-
-TEST_F(PacedSenderTest, PassThroughRtcp) {
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1).WillRepeatedly(
- testing::Return(true));
-
- std::vector<uint8> packet(kSize1, kValue);
- int num_of_packets = 1;
- EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- EXPECT_TRUE(paced_sender_->ResendPacket(packet, num_of_packets));
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1).WillRepeatedly(
- testing::Return(true));
- EXPECT_TRUE(paced_sender_->SendRtcpPacket(packet));
-}
-
-TEST_F(PacedSenderTest, BasicPace) {
- std::vector<uint8> packet(kSize1, kValue);
- int num_of_packets = 9;
-
- EXPECT_CALL(mock_transport_,
- SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
- for (int i = 0; i < num_of_packets; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(packet, num_of_packets));
- }
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
-
- // Check that we get the next burst.
- EXPECT_CALL(mock_transport_,
- SendPacket(_, kSize1)).Times(3).WillRepeatedly(testing::Return(true));
-
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // If we call process too early make sure we don't send any packets.
- timeout = base::TimeDelta::FromMilliseconds(5);
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Check that we get the next burst.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
- testing::Return(true));
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Check that we don't get any more packets.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(0);
- timeout = base::TimeDelta::FromMilliseconds(10);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-}
-
-TEST_F(PacedSenderTest, PaceWithNack) {
- // Testing what happen when we get multiple NACK requests for a fully lost
- // frames just as we sent the first packets in a frame.
- std::vector<uint8> firts_packet(kSize1, kValue);
- std::vector<uint8> second_packet(kSize2, kValue);
- std::vector<uint8> nack_packet(kNackSize, kValue);
- int num_of_packets_in_frame = 9;
- int num_of_packets_in_nack = 9;
-
- // Check that the first burst of the frame go out on the wire.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
- testing::Return(true));
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
- num_of_packets_in_frame));
- }
- // Add first NACK request.
- for (int i = 0; i < num_of_packets_in_nack; ++i) {
- EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
- num_of_packets_in_nack));
- }
- // Check that we get the first NACK burst.
- EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(5).
- WillRepeatedly(testing::Return(true));
-
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(10);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Add second NACK request.
- for (int i = 0; i < num_of_packets_in_nack; ++i) {
- EXPECT_TRUE(paced_sender_->ResendPacket(nack_packet,
- num_of_packets_in_nack));
- }
-
- // Check that we get the next NACK burst.
- EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(7)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // End of NACK plus a packet from the oldest frame.
- EXPECT_CALL(mock_transport_, SendPacket(_, kNackSize)).Times(6)
- .WillRepeatedly(testing::Return(true));
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(1)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Add second frame.
- // Make sure we don't delay the second frame due to the previous packets.
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
- num_of_packets_in_frame));
- }
-
- // Last packets of frame 1 and the first packets of frame 2.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(5).WillRepeatedly(
- testing::Return(true));
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(2).WillRepeatedly(
- testing::Return(true));
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // Last packets of frame 2.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(7).WillRepeatedly(
- testing::Return(true));
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-
- // No more packets.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(0);
- testing_clock_.Advance(timeout);
- task_runner_->RunTasks();
-}
-
-TEST_F(PacedSenderTest, PaceWith60fps) {
- // Testing what happen when we get multiple NACK requests for a fully lost
- // frames just as we sent the first packets in a frame.
- std::vector<uint8> firts_packet(kSize1, kValue);
- std::vector<uint8> second_packet(kSize2, kValue);
- std::vector<uint8> third_packet(kSize3, kValue);
- std::vector<uint8> fourth_packet(kSize4, kValue);
- base::TimeDelta timeout_10ms = base::TimeDelta::FromMilliseconds(10);
- int num_of_packets_in_frame = 9;
-
- // Check that the first burst of the frame go out on the wire.
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).WillRepeatedly(
- testing::Return(true));
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(firts_packet,
- num_of_packets_in_frame));
- }
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3).
- WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(6));
-
- // Add second frame, after 16 ms.
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(second_packet,
- num_of_packets_in_frame));
- }
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(4));
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize1)).Times(3)
- .WillRepeatedly(testing::Return(true));
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(1)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(3));
-
- // Add third frame, after 33 ms.
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(third_packet,
- num_of_packets_in_frame));
- }
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize2)).Times(4)
- .WillRepeatedly(testing::Return(true));
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(1)
- .WillRepeatedly(testing::Return(true));
-
- testing_clock_.Advance(base::TimeDelta::FromMilliseconds(7));
- task_runner_->RunTasks();
-
- // Add fourth frame, after 50 ms.
- for (int i = 0; i < num_of_packets_in_frame; ++i) {
- EXPECT_TRUE(paced_sender_->SendPacket(fourth_packet,
- num_of_packets_in_frame));
- }
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(6)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize3)).Times(2)
- .WillRepeatedly(testing::Return(true));
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(4)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(5)
- .WillRepeatedly(testing::Return(true));
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-
- EXPECT_CALL(mock_transport_, SendPacket(_, kSize4)).Times(0);
- testing_clock_.Advance(timeout_10ms);
- task_runner_->RunTasks();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
new file mode 100644
index 00000000000..daaa1ad0883
--- /dev/null
+++ b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
@@ -0,0 +1,23 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
+
+namespace media {
+namespace cast {
+
+MockRtcpReceiverFeedback::MockRtcpReceiverFeedback() {
+}
+
+MockRtcpReceiverFeedback::~MockRtcpReceiverFeedback() {
+}
+
+MockRtcpRttFeedback::MockRtcpRttFeedback() {
+}
+
+MockRtcpRttFeedback::~MockRtcpRttFeedback() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
index 09e2cb4faf5..0316d9819f2 100644
--- a/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
+++ b/chromium/media/cast/rtcp/mock_rtcp_receiver_feedback.h
@@ -15,6 +15,9 @@ namespace cast {
class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
public:
+ MockRtcpReceiverFeedback();
+ virtual ~MockRtcpReceiverFeedback();
+
MOCK_METHOD1(OnReceivedSenderReport,
void(const RtcpSenderInfo& remote_sender_info));
@@ -22,10 +25,18 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
void(const RtcpReceiverReferenceTimeReport& remote_time_report));
MOCK_METHOD0(OnReceivedSendReportRequest, void());
+
+ MOCK_METHOD1(OnReceivedReceiverLog,
+ void(const RtcpReceiverLogMessage& receiver_log));
+ MOCK_METHOD1(OnReceivedSenderLog,
+ void(const RtcpSenderLogMessage& sender_log));
};
class MockRtcpRttFeedback : public RtcpRttFeedback {
public:
+ MockRtcpRttFeedback();
+ virtual ~MockRtcpRttFeedback();
+
MOCK_METHOD3(OnReceivedDelaySinceLastReport,
void(uint32 media_ssrc,
uint32 last_report,
diff --git a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc
new file mode 100644
index 00000000000..65c630148c2
--- /dev/null
+++ b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.cc
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
+
+namespace media {
+namespace cast {
+
+MockRtcpSenderFeedback::MockRtcpSenderFeedback() {
+}
+
+MockRtcpSenderFeedback::~MockRtcpSenderFeedback() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h
index 3947625489f..40547e6283f 100644
--- a/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h
+++ b/chromium/media/cast/rtcp/mock_rtcp_sender_feedback.h
@@ -15,17 +15,8 @@ namespace cast {
class MockRtcpSenderFeedback : public RtcpSenderFeedback {
public:
- MOCK_METHOD1(OnReceivedReportBlock,
- void(const RtcpReportBlock& report_block));
-
- MOCK_METHOD0(OnReceivedIntraFrameRequest, void());
-
- MOCK_METHOD2(OnReceivedRpsi, void(uint8 payload_type, uint64 picture_id));
-
- MOCK_METHOD1(OnReceivedRemb, void(uint32 bitrate));
-
- MOCK_METHOD1(OnReceivedNackRequest,
- void(const std::list<uint16>& nack_sequence_numbers));
+ MockRtcpSenderFeedback();
+ virtual ~MockRtcpSenderFeedback();
MOCK_METHOD1(OnReceivedCastFeedback,
void(const RtcpCastMessage& cast_feedback));
diff --git a/chromium/media/cast/rtcp/rtcp.cc b/chromium/media/cast/rtcp/rtcp.cc
index c3e2c8e4d88..4ea4bc99ba9 100644
--- a/chromium/media/cast/rtcp/rtcp.cc
+++ b/chromium/media/cast/rtcp/rtcp.cc
@@ -4,10 +4,10 @@
#include "media/cast/rtcp/rtcp.h"
-#include "base/debug/trace_event.h"
#include "base/rand_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_sender.h"
@@ -17,7 +17,7 @@
namespace media {
namespace cast {
-static const int kMaxRttMs = 1000000; // 1000 seconds.
+static const int kMaxRttMs = 10000; // 10 seconds.
// Time limit for received RTCP messages when we stop using it for lip-sync.
static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
@@ -52,11 +52,16 @@ RtcpNackMessage::~RtcpNackMessage() {}
RtcpRembMessage::RtcpRembMessage() {}
RtcpRembMessage::~RtcpRembMessage() {}
+RtcpReceiverFrameLogMessage::RtcpReceiverFrameLogMessage(uint32 timestamp)
+ : rtp_timestamp_(timestamp) {}
+
+RtcpReceiverFrameLogMessage::~RtcpReceiverFrameLogMessage() {}
class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
public:
- explicit LocalRtcpReceiverFeedback(Rtcp* rtcp)
- : rtcp_(rtcp) {
+ LocalRtcpReceiverFeedback(Rtcp* rtcp,
+ scoped_refptr<CastEnvironment> cast_environment)
+ : rtcp_(rtcp), cast_environment_(cast_environment) {
}
virtual void OnReceivedSenderReport(
@@ -80,48 +85,128 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
rtcp_->OnReceivedSendReportRequest();
}
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
+ // Add received log messages into our log system.
+ RtcpReceiverLogMessage::const_iterator it = receiver_log.begin();
+
+ for (; it != receiver_log.end(); ++it) {
+ uint32 rtp_timestamp = it->rtp_timestamp_;
+
+ RtcpReceiverEventLogMessages::const_iterator event_it =
+ it->event_log_messages_.begin();
+ for (; event_it != it->event_log_messages_.end(); ++event_it) {
+ // TODO(pwestin): we need to send in the event_it->event_timestamp to
+ // the log system too.
+ switch (event_it->type) {
+ case kPacketReceived:
+ cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
+ rtp_timestamp, kFrameIdUnknown, event_it->packet_id, 0, 0);
+ break;
+ case kAckSent:
+ case kAudioFrameDecoded:
+ case kVideoFrameDecoded:
+ cast_environment_->Logging()->InsertFrameEvent(event_it->type,
+ rtp_timestamp, kFrameIdUnknown);
+ break;
+ case kAudioPlayoutDelay:
+ case kVideoRenderDelay:
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ event_it->type, rtp_timestamp, kFrameIdUnknown,
+ event_it->delay_delta);
+ break;
+ default:
+ VLOG(2) << "Received log message via RTCP that we did not expect: "
+ << static_cast<int>(event_it->type);
+ break;
+ }
+ }
+ }
+ }
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) OVERRIDE {
+ RtcpSenderLogMessage::const_iterator it = sender_log.begin();
+
+ for (; it != sender_log.end(); ++it) {
+ uint32 rtp_timestamp = it->rtp_timestamp;
+ CastLoggingEvent log_event = kUnknown;
+
+ // These events are provided to know the status of frames that never
+ // reached the receiver. The timing information for these events are not
+ // relevant and is not sent over the wire.
+ switch (it->frame_status) {
+ case kRtcpSenderFrameStatusDroppedByFlowControl:
+ // A frame that have been dropped by the flow control would have
+ // kVideoFrameCaptured as its last event in the log.
+ log_event = kVideoFrameCaptured;
+ break;
+ case kRtcpSenderFrameStatusDroppedByEncoder:
+ // A frame that have been dropped by the encoder would have
+ // kVideoFrameSentToEncoder as its last event in the log.
+ log_event = kVideoFrameSentToEncoder;
+ break;
+ case kRtcpSenderFrameStatusSentToNetwork:
+ // A frame that have be encoded is always sent to the network. We
+ // do not add a new log entry for this.
+ log_event = kVideoFrameEncoded;
+ break;
+ default:
+ continue;
+ }
+ // TODO(pwestin): how do we handle the truncated rtp_timestamp?
+ // Add received log messages into our log system.
+ cast_environment_->Logging()->InsertFrameEvent(log_event, rtp_timestamp,
+ kFrameIdUnknown);
+ }
+ }
+
private:
Rtcp* rtcp_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
-Rtcp::Rtcp(RtcpSenderFeedback* sender_feedback,
+Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name)
: rtcp_interval_(rtcp_interval),
rtcp_mode_(rtcp_mode),
- sending_media_(sending_media),
local_ssrc_(local_ssrc),
+ remote_ssrc_(remote_ssrc),
rtp_sender_statistics_(rtp_sender_statistics),
rtp_receiver_statistics_(rtp_receiver_statistics),
- receiver_feedback_(new LocalRtcpReceiverFeedback(this)),
+ receiver_feedback_(new LocalRtcpReceiverFeedback(this, cast_environment)),
rtt_feedback_(new LocalRtcpRttFeedback(this)),
- rtcp_sender_(new RtcpSender(paced_packet_sender, local_ssrc, c_name)),
- last_report_sent_(0),
+ rtcp_sender_(new RtcpSender(cast_environment, paced_packet_sender,
+ local_ssrc, c_name)),
last_report_received_(0),
last_received_rtp_timestamp_(0),
last_received_ntp_seconds_(0),
last_received_ntp_fraction_(0),
min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
number_of_rtt_in_avg_(0),
- clock_(&default_tick_clock_) {
- rtcp_receiver_.reset(new RtcpReceiver(sender_feedback,
+ cast_environment_(cast_environment) {
+ rtcp_receiver_.reset(new RtcpReceiver(cast_environment,
+ sender_feedback,
receiver_feedback_.get(),
rtt_feedback_.get(),
local_ssrc));
+ rtcp_receiver_->SetRemoteSSRC(remote_ssrc);
}
Rtcp::~Rtcp() {}
// static
-bool Rtcp::IsRtcpPacket(const uint8* packet, int length) {
- DCHECK_GE(length, 8) << "Invalid RTCP packet";
- if (length < 8) return false;
+bool Rtcp::IsRtcpPacket(const uint8* packet, size_t length) {
+ DCHECK_GE(length, kMinLengthOfRtcp) << "Invalid RTCP packet";
+ if (length < kMinLengthOfRtcp) return false;
uint8 packet_type = packet[1];
if (packet_type >= kPacketTypeLow && packet_type <= kPacketTypeHigh) {
@@ -131,7 +216,8 @@ bool Rtcp::IsRtcpPacket(const uint8* packet, int length) {
}
// static
-uint32 Rtcp::GetSsrcOfSender(const uint8* rtcp_buffer, int length) {
+uint32 Rtcp::GetSsrcOfSender(const uint8* rtcp_buffer, size_t length) {
+ DCHECK_GE(length, kMinLengthOfRtcp) << "Invalid RTCP packet";
uint32 ssrc_of_sender;
net::BigEndianReader big_endian_reader(rtcp_buffer, length);
big_endian_reader.Skip(4); // Skip header
@@ -146,11 +232,7 @@ base::TimeTicks Rtcp::TimeToSendNextRtcpReport() {
return next_time_to_send_rtcp_;
}
-void Rtcp::SetRemoteSSRC(uint32 ssrc) {
- rtcp_receiver_->SetRemoteSSRC(ssrc);
-}
-
-void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, int length) {
+void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length) {
RtcpParser rtcp_parser(rtcp_buffer, length);
if (!rtcp_parser.IsValid()) {
// Silently ignore packet.
@@ -160,99 +242,38 @@ void Rtcp::IncomingRtcpPacket(const uint8* rtcp_buffer, int length) {
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
-void Rtcp::SendRtcpCast(const RtcpCastMessage& cast_message) {
- uint32 packet_type_flags = 0;
- base::TimeTicks now = clock_->NowTicks();
-
- if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
- }
- }
- packet_type_flags |= RtcpSender::kRtcpCast;
-
- SendRtcp(now, packet_type_flags, 0, &cast_message);
-}
-
-void Rtcp::SendRtcpPli(uint32 pli_remote_ssrc) {
+void Rtcp::SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
+ RtcpReceiverLogMessage* receiver_log) {
uint32 packet_type_flags = 0;
- base::TimeTicks now = clock_->NowTicks();
- if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
- }
- }
- packet_type_flags |= RtcpSender::kRtcpPli;
- SendRtcp(now, packet_type_flags, pli_remote_ssrc, NULL);
-}
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ RtcpReportBlock report_block;
+ RtcpReceiverReferenceTimeReport rrtr;
-void Rtcp::SendRtcpReport(uint32 media_ssrc) {
- uint32 packet_type_flags;
- base::TimeTicks now = clock_->NowTicks();
- if (sending_media_) {
- packet_type_flags = RtcpSender::kRtcpSr;
- } else {
- packet_type_flags = RtcpSender::kRtcpRr;
+ if (cast_message) {
+ packet_type_flags |= RtcpSender::kRtcpCast;
+ cast_environment_->Logging()->InsertGenericEvent(kAckSent,
+ cast_message->ack_frame_id_);
}
- SendRtcp(now, packet_type_flags, media_ssrc, NULL);
-}
-
-void Rtcp::SendRtcp(const base::TimeTicks& now,
- uint32 packet_type_flags,
- uint32 media_ssrc,
- const RtcpCastMessage* cast_message) {
- if (packet_type_flags & RtcpSender::kRtcpSr ||
- packet_type_flags & RtcpSender::kRtcpRr) {
- UpdateNextTimeToSendRtcp();
+ if (receiver_log) {
+ packet_type_flags |= RtcpSender::kRtcpReceiverLog;
}
- if (packet_type_flags & RtcpSender::kRtcpSr) {
- RtcpSenderInfo sender_info;
-
- if (rtp_sender_statistics_) {
- rtp_sender_statistics_->GetStatistics(now, &sender_info);
- } else {
- memset(&sender_info, 0, sizeof(sender_info));
- }
- time_last_report_sent_ = now;
- last_report_sent_ = (sender_info.ntp_seconds << 16) +
- (sender_info.ntp_fraction >> 16);
-
- RtcpDlrrReportBlock dlrr;
- if (!time_last_report_received_.is_null()) {
- packet_type_flags |= RtcpSender::kRtcpDlrr;
- dlrr.last_rr = last_report_received_;
- uint32 delay_seconds = 0;
- uint32 delay_fraction = 0;
- base::TimeDelta delta = now - time_last_report_received_;
- ConvertTimeToFractions(delta.InMicroseconds(),
- &delay_seconds,
- &delay_fraction);
+ if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
+ packet_type_flags |= RtcpSender::kRtcpRr;
- dlrr.delay_since_last_rr =
- ConvertToNtpDiff(delay_seconds, delay_fraction);
- }
- rtcp_sender_->SendRtcp(packet_type_flags,
- &sender_info,
- NULL,
- media_ssrc,
- &dlrr,
- NULL,
- NULL);
- } else {
- RtcpReportBlock report_block;
report_block.remote_ssrc = 0; // Not needed to set send side.
- report_block.media_ssrc = media_ssrc; // SSRC of the RTP packet sender.
+ report_block.media_ssrc = remote_ssrc_; // SSRC of the RTP packet sender.
if (rtp_receiver_statistics_) {
rtp_receiver_statistics_->GetStatistics(
&report_block.fraction_lost,
&report_block.cumulative_lost,
&report_block.extended_high_sequence_number,
&report_block.jitter);
+ cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
+ report_block.jitter);
+ cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
+ report_block.fraction_lost);
+
}
report_block.last_sr = last_report_received_;
@@ -270,26 +291,59 @@ void Rtcp::SendRtcp(const base::TimeTicks& now,
}
packet_type_flags |= RtcpSender::kRtcpRrtr;
- RtcpReceiverReferenceTimeReport rrtr;
- ConvertTimeToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
-
- time_last_report_sent_ = now;
- last_report_sent_ = ConvertToNtpDiff(rrtr.ntp_seconds, rrtr.ntp_fraction);
-
- rtcp_sender_->SendRtcp(packet_type_flags,
- NULL,
- &report_block,
- media_ssrc,
- NULL,
- &rrtr,
- cast_message);
+ ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
+ SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
+ UpdateNextTimeToSendRtcp();
}
+ rtcp_sender_->SendRtcpFromRtpReceiver(packet_type_flags,
+ &report_block,
+ &rrtr,
+ cast_message,
+ receiver_log);
+}
+
+void Rtcp::SendRtcpFromRtpSender(
+ RtcpSenderLogMessage* sender_log_message) {
+ uint32 packet_type_flags = RtcpSender::kRtcpSr;
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+
+ if (sender_log_message) {
+ packet_type_flags |= RtcpSender::kRtcpSenderLog;
+ }
+
+ RtcpSenderInfo sender_info;
+ if (rtp_sender_statistics_) {
+ rtp_sender_statistics_->GetStatistics(now, &sender_info);
+ } else {
+ memset(&sender_info, 0, sizeof(sender_info));
+ }
+ SaveLastSentNtpTime(now, sender_info.ntp_seconds, sender_info.ntp_fraction);
+
+ RtcpDlrrReportBlock dlrr;
+ if (!time_last_report_received_.is_null()) {
+ packet_type_flags |= RtcpSender::kRtcpDlrr;
+ dlrr.last_rr = last_report_received_;
+ uint32 delay_seconds = 0;
+ uint32 delay_fraction = 0;
+ base::TimeDelta delta = now - time_last_report_received_;
+ ConvertTimeToFractions(delta.InMicroseconds(),
+ &delay_seconds,
+ &delay_fraction);
+
+ dlrr.delay_since_last_rr = ConvertToNtpDiff(delay_seconds, delay_fraction);
+ }
+
+ rtcp_sender_->SendRtcpFromRtpSender(packet_type_flags,
+ &sender_info,
+ &dlrr,
+ sender_log_message);
+ UpdateNextTimeToSendRtcp();
}
void Rtcp::OnReceivedNtp(uint32 ntp_seconds, uint32 ntp_fraction) {
last_report_received_ = (ntp_seconds << 16) + (ntp_fraction >> 16);
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
time_last_report_received_ = now;
}
@@ -302,7 +356,7 @@ void Rtcp::OnReceivedLipSyncInfo(uint32 rtp_timestamp,
}
void Rtcp::OnReceivedSendReportRequest() {
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
// Trigger a new RTCP report at next timer.
next_time_to_send_rtcp_ = now;
@@ -333,22 +387,52 @@ bool Rtcp::RtpTimestampInSenderTime(int frequency, uint32 rtp_timestamp,
// Sanity check.
if (abs(rtp_time_diff_ms) > kMaxDiffSinceReceivedRtcpMs) return false;
- *rtp_timestamp_in_ticks =
- ConvertNtpToTime(last_received_ntp_seconds_, last_received_ntp_fraction_) +
- base::TimeDelta::FromMilliseconds(rtp_time_diff_ms);
+ *rtp_timestamp_in_ticks = ConvertNtpToTimeTicks(last_received_ntp_seconds_,
+ last_received_ntp_fraction_) +
+ base::TimeDelta::FromMilliseconds(rtp_time_diff_ms);
return true;
}
void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
uint32 last_report,
uint32 delay_since_last_report) {
- if (last_report_sent_ != last_report) return; // Feedback on another report.
- if (time_last_report_sent_.is_null()) return;
+ RtcpSendTimeMap::iterator it = last_reports_sent_map_.find(last_report);
+ if (it == last_reports_sent_map_.end()) {
+ return; // Feedback on another report.
+ }
- base::TimeDelta sender_delay = clock_->NowTicks() - time_last_report_sent_;
+ base::TimeDelta sender_delay = cast_environment_->Clock()->NowTicks()
+ - it->second;
UpdateRtt(sender_delay, ConvertFromNtpDiff(delay_since_last_report));
}
+void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
+ uint32 last_ntp_seconds,
+ uint32 last_ntp_fraction) {
+ // Make sure |now| is always greater than the last element in
+ // |last_reports_sent_queue_|.
+ if (!last_reports_sent_queue_.empty()) {
+ DCHECK(now >= last_reports_sent_queue_.back().second);
+ }
+
+ uint32 last_report = ConvertToNtpDiff(last_ntp_seconds, last_ntp_fraction);
+ last_reports_sent_map_[last_report] = now;
+ last_reports_sent_queue_.push(std::make_pair(last_report, now));
+
+ base::TimeTicks timeout = now - base::TimeDelta::FromMilliseconds(kMaxRttMs);
+
+ // Cleanup old statistics older than |timeout|.
+ while (!last_reports_sent_queue_.empty()) {
+ RtcpSendTimePair oldest_report = last_reports_sent_queue_.front();
+ if (oldest_report.second < timeout) {
+ last_reports_sent_map_.erase(oldest_report.first);
+ last_reports_sent_queue_.pop();
+ } else {
+ break;
+ }
+ }
+}
+
void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay) {
base::TimeDelta rtt = sender_delay - receiver_delay;
@@ -365,7 +449,6 @@ void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
avg_rtt_ms_ = rtt.InMilliseconds();
}
number_of_rtt_in_avg_++;
- TRACE_COUNTER_ID1("cast_rtcp", "RTT", local_ssrc_, rtt.InMilliseconds());
}
bool Rtcp::Rtt(base::TimeDelta* rtt,
@@ -377,7 +460,9 @@ bool Rtcp::Rtt(base::TimeDelta* rtt,
DCHECK(min_rtt) << "Invalid argument";
DCHECK(max_rtt) << "Invalid argument";
- if (number_of_rtt_in_avg_ == 0) return false;
+ if (number_of_rtt_in_avg_ == 0) return false;
+ cast_environment_->Logging()->InsertGenericEvent(kRttMs,
+ rtt->InMilliseconds());
*rtt = rtt_;
*avg_rtt = base::TimeDelta::FromMilliseconds(avg_rtt_ms_);
@@ -408,7 +493,7 @@ void Rtcp::UpdateNextTimeToSendRtcp() {
base::TimeDelta time_to_next = (rtcp_interval_ / 2) +
(rtcp_interval_ * random / 1000);
- base::TimeTicks now = clock_->NowTicks();
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
next_time_to_send_rtcp_ = now + time_to_next;
}
diff --git a/chromium/media/cast/rtcp/rtcp.h b/chromium/media/cast/rtcp/rtcp.h
index 31962a526c6..aa083a5a4dd 100644
--- a/chromium/media/cast/rtcp/rtcp.h
+++ b/chromium/media/cast/rtcp/rtcp.h
@@ -7,16 +7,17 @@
#include <list>
#include <map>
+#include <queue>
#include <set>
#include <string>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/rtcp_defines.h"
namespace media {
@@ -28,19 +29,12 @@ class PacedPacketSender;
class RtcpReceiver;
class RtcpSender;
+typedef std::pair<uint32, base::TimeTicks> RtcpSendTimePair;
+typedef std::map<uint32, base::TimeTicks> RtcpSendTimeMap;
+typedef std::queue<RtcpSendTimePair> RtcpSendTimeQueue;
+
class RtcpSenderFeedback {
public:
- virtual void OnReceivedReportBlock(const RtcpReportBlock& report_block) = 0;
-
- virtual void OnReceivedIntraFrameRequest() = 0;
-
- virtual void OnReceivedRpsi(uint8 payload_type, uint64 picture_id) = 0;
-
- virtual void OnReceivedRemb(uint32 bitrate) = 0;
-
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) = 0;
-
virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
virtual ~RtcpSenderFeedback() {}
@@ -66,39 +60,49 @@ class RtpReceiverStatistics {
class Rtcp {
public:
- Rtcp(RtcpSenderFeedback* sender_feedback,
+ Rtcp(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name);
virtual ~Rtcp();
- static bool IsRtcpPacket(const uint8* rtcp_buffer, int length);
+ static bool IsRtcpPacket(const uint8* rtcp_buffer, size_t length);
- static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, int length);
+ static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
base::TimeTicks TimeToSendNextRtcpReport();
- void SendRtcpReport(uint32 media_ssrc);
- void SendRtcpPli(uint32 media_ssrc);
- void SendRtcpCast(const RtcpCastMessage& cast_message);
- void SetRemoteSSRC(uint32 ssrc);
-
- void IncomingRtcpPacket(const uint8* rtcp_buffer, int length);
+ // |sender_log_message| is optional; without it no log messages will be
+ // attached to the RTCP report; instead a normal RTCP send report will be
+ // sent.
+ // Additionally if all messages in |sender_log_message| does
+ // not fit in the packet the |sender_log_message| will contain the remaining
+ // unsent messages.
+ void SendRtcpFromRtpSender(RtcpSenderLogMessage* sender_log_message);
+
+ // |cast_message| and |receiver_log| is optional; if |cast_message| is
+ // provided the RTCP receiver report will append a Cast message containing
+ // Acks and Nacks; if |receiver_log| is provided the RTCP receiver report will
+ // append the log messages. If no argument is set a normal RTCP receiver
+ // report will be sent. Additionally if all messages in |receiver_log| does
+ // not fit in the packet the |receiver_log| will contain the remaining unsent
+ // messages.
+ void SendRtcpFromRtpReceiver(const RtcpCastMessage* cast_message,
+ RtcpReceiverLogMessage* receiver_log);
+
+ void IncomingRtcpPacket(const uint8* rtcp_buffer, size_t length);
bool Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
base::TimeDelta* min_rtt, base::TimeDelta* max_rtt) const;
bool RtpTimestampInSenderTime(int frequency,
uint32 rtp_timestamp,
base::TimeTicks* rtp_timestamp_in_ticks) const;
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
-
protected:
int CheckForWrapAround(uint32 new_timestamp,
uint32 old_timestamp) const;
@@ -129,10 +133,14 @@ class Rtcp {
void UpdateNextTimeToSendRtcp();
+ void SaveLastSentNtpTime(const base::TimeTicks& now, uint32 last_ntp_seconds,
+ uint32 last_ntp_fraction);
+
+ scoped_refptr<CastEnvironment> cast_environment_;
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
- const bool sending_media_;
const uint32 local_ssrc_;
+ const uint32 remote_ssrc_;
// Not owned by this class.
RtpSenderStatistics* const rtp_sender_statistics_;
@@ -144,10 +152,8 @@ class Rtcp {
scoped_ptr<RtcpReceiver> rtcp_receiver_;
base::TimeTicks next_time_to_send_rtcp_;
-
- base::TimeTicks time_last_report_sent_;
- uint32 last_report_sent_;
-
+ RtcpSendTimeMap last_reports_sent_map_;
+ RtcpSendTimeQueue last_reports_sent_queue_;
base::TimeTicks time_last_report_received_;
uint32 last_report_received_;
@@ -161,9 +167,6 @@ class Rtcp {
int number_of_rtt_in_avg_;
float avg_rtt_ms_;
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
-
DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
diff --git a/chromium/media/cast/rtcp/rtcp_defines.h b/chromium/media/cast/rtcp/rtcp_defines.h
index f0635f8ca8f..0277bd1feaf 100644
--- a/chromium/media/cast/rtcp/rtcp_defines.h
+++ b/chromium/media/cast/rtcp/rtcp_defines.h
@@ -11,20 +11,58 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
+#include "media/cast/logging/logging_defines.h"
namespace media {
namespace cast {
+// Handle the per frame ACK and NACK messages.
class RtcpCastMessage {
public:
explicit RtcpCastMessage(uint32 media_ssrc);
~RtcpCastMessage();
uint32 media_ssrc_;
- uint8 ack_frame_id_;
+ uint32 ack_frame_id_;
MissingFramesAndPacketsMap missing_frames_and_packets_;
};
+// Log messages form sender to receiver.
+enum RtcpSenderFrameStatus {
+ kRtcpSenderFrameStatusUnknown = 0,
+ kRtcpSenderFrameStatusDroppedByEncoder = 1,
+ kRtcpSenderFrameStatusDroppedByFlowControl = 2,
+ kRtcpSenderFrameStatusSentToNetwork = 3,
+};
+
+struct RtcpSenderFrameLogMessage {
+ RtcpSenderFrameStatus frame_status;
+ uint32 rtp_timestamp;
+};
+
+typedef std::list<RtcpSenderFrameLogMessage> RtcpSenderLogMessage;
+
+// Log messages from receiver to sender.
+struct RtcpReceiverEventLogMessage {
+ CastLoggingEvent type;
+ base::TimeTicks event_timestamp;
+ base::TimeDelta delay_delta;
+ uint16 packet_id;
+};
+
+typedef std::list<RtcpReceiverEventLogMessage> RtcpReceiverEventLogMessages;
+
+class RtcpReceiverFrameLogMessage {
+ public:
+ explicit RtcpReceiverFrameLogMessage(uint32 rtp_timestamp);
+ ~RtcpReceiverFrameLogMessage();
+
+ uint32 rtp_timestamp_;
+ RtcpReceiverEventLogMessages event_log_messages_;
+};
+
+typedef std::list<RtcpReceiverFrameLogMessage> RtcpReceiverLogMessage;
+
struct RtcpSenderInfo {
// First three members are used for lipsync.
// First two members are used for rtt.
@@ -32,7 +70,7 @@ struct RtcpSenderInfo {
uint32 ntp_fraction;
uint32 rtp_timestamp;
uint32 send_packet_count;
- uint32 send_octet_count;
+ size_t send_octet_count;
};
struct RtcpReportBlock {
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.cc b/chromium/media/cast/rtcp/rtcp_receiver.cc
index c0e9b9b501b..152ebc00d7b 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver.cc
+++ b/chromium/media/cast/rtcp/rtcp_receiver.cc
@@ -4,23 +4,70 @@
#include "media/cast/rtcp/rtcp_receiver.h"
-#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/cast/rtcp/rtcp_utility.h"
+namespace {
+
+media::cast::CastLoggingEvent TranslateToLogEventFromWireFormat(uint8 event) {
+ switch (event) {
+ case 1:
+ return media::cast::kAckSent;
+ case 2:
+ return media::cast::kAudioPlayoutDelay;
+ case 3:
+ return media::cast::kAudioFrameDecoded;
+ case 4:
+ return media::cast::kVideoFrameDecoded;
+ case 5:
+ return media::cast::kVideoRenderDelay;
+ case 6:
+ return media::cast::kPacketReceived;
+ default:
+ // If the sender adds new log messages we will end up here until we add
+ // the new messages in the receiver.
+ VLOG(1) << "Unexpected log message received: " << static_cast<int>(event);
+ NOTREACHED();
+ return media::cast::kUnknown;
+ }
+}
+
+media::cast::RtcpSenderFrameStatus TranslateToFrameStatusFromWireFormat(
+ uint8 status) {
+ switch (status) {
+ case 0:
+ return media::cast::kRtcpSenderFrameStatusUnknown;
+ case 1:
+ return media::cast::kRtcpSenderFrameStatusDroppedByEncoder;
+ case 2:
+ return media::cast::kRtcpSenderFrameStatusDroppedByFlowControl;
+ case 3:
+ return media::cast::kRtcpSenderFrameStatusSentToNetwork;
+ default:
+ // If the sender adds new log messages we will end up here until we add
+ // the new messages in the receiver.
+ NOTREACHED();
+ VLOG(1) << "Unexpected status received: " << static_cast<int>(status);
+ return media::cast::kRtcpSenderFrameStatusUnknown;
+ }
+}
+
+} // namespace
+
namespace media {
namespace cast {
-RtcpReceiver::RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+RtcpReceiver::RtcpReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
RtcpReceiverFeedback* receiver_feedback,
RtcpRttFeedback* rtt_feedback,
uint32 local_ssrc)
- : ssrc_(local_ssrc),
+ : ssrc_(local_ssrc),
remote_ssrc_(0),
sender_feedback_(sender_feedback),
receiver_feedback_(receiver_feedback),
- rtt_feedback_(rtt_feedback) {
-}
+ rtt_feedback_(rtt_feedback),
+ cast_environment_(cast_environment) {}
RtcpReceiver::~RtcpReceiver() {}
@@ -67,15 +114,18 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpPayloadSpecificAppCode:
HandlePayloadSpecificApp(rtcp_parser);
break;
+ case kRtcpApplicationSpecificCastReceiverLogCode:
+ HandleApplicationSpecificCastReceiverLog(rtcp_parser);
+ break;
+ case kRtcpApplicationSpecificCastSenderLogCode:
+ HandleApplicationSpecificCastSenderLog(rtcp_parser);
+ break;
case kRtcpPayloadSpecificRembCode:
case kRtcpPayloadSpecificRembItemCode:
- // Ignore this until we want to support interop with webrtc.
- rtcp_parser->Iterate();
- break;
case kRtcpPayloadSpecificCastCode:
case kRtcpPayloadSpecificCastNackItemCode:
- rtcp_parser->Iterate();
- break;
+ case kRtcpApplicationSpecificCastReceiverLogFrameCode:
+ case kRtcpApplicationSpecificCastReceiverLogEventCode:
case kRtcpNotValidCode:
case kRtcpReportBlockItemCode:
case kRtcpSdesChunkCode:
@@ -85,7 +135,7 @@ void RtcpReceiver::IncomingRtcpPacket(RtcpParser* rtcp_parser) {
case kRtcpXrDlrrCode:
case kRtcpXrUnknownItemCode:
rtcp_parser->Iterate();
- DCHECK(false) << "Invalid state";
+ NOTREACHED() << "Invalid state";
break;
}
field_type = rtcp_parser->FieldType();
@@ -101,8 +151,7 @@ void RtcpReceiver::HandleSenderReport(RtcpParser* rtcp_parser) {
// Synchronization source identifier for the originator of this SR packet.
uint32 remote_ssrc = rtcp_field.sender_report.sender_ssrc;
- TRACE_EVENT_INSTANT1("cast_rtcp", "SR", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received SR from SSRC " << remote_ssrc;
if (remote_ssrc_ == remote_ssrc) {
RtcpSenderInfo remote_sender_info;
@@ -135,8 +184,7 @@ void RtcpReceiver::HandleReceiverReport(RtcpParser* rtcp_parser) {
uint32 remote_ssrc = rtcp_field.receiver_report.sender_ssrc;
- TRACE_EVENT_INSTANT1("cast_rtcp", "RR", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received RR from SSRC " << remote_ssrc;
rtcp_field_type = rtcp_parser->Iterate();
while (rtcp_field_type == kRtcpReportBlockItemCode) {
@@ -163,16 +211,11 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
// This block is not for us ignore it.
return;
}
- TRACE_EVENT_INSTANT2("cast_rtcp", "RB", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc,
- "ssrc", ssrc_);
-
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::FractionLost",
- rb.ssrc, rb.fraction_lost);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::CumulativeNumberOfPacketsLost",
- rb.ssrc, rb.cumulative_number_of_packets_lost);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpReceiver::Jitter",
- rb.ssrc, rb.jitter);
+ VLOG(1) << "Cast RTCP received RB from SSRC " << remote_ssrc;
+ cast_environment_->Logging()->InsertGenericEvent(kPacketLoss,
+ rb.fraction_lost);
+ cast_environment_->Logging()->InsertGenericEvent(kJitterMs,
+ rb.jitter);
RtcpReportBlock report_block;
report_block.remote_ssrc = remote_ssrc;
@@ -185,9 +228,6 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
report_block.last_sr = rb.last_sender_report;
report_block.delay_since_last_sr = rb.delay_last_sender_report;
- if (sender_feedback_) {
- sender_feedback_->OnReceivedReportBlock(report_block);
- }
if (rtt_feedback_) {
rtt_feedback_->OnReceivedDelaySinceLastReport(rb.ssrc,
rb.last_sender_report,
@@ -205,8 +245,7 @@ void RtcpReceiver::HandleSDES(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleSDESChunk(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
- TRACE_EVENT_INSTANT1("cast_rtcp", "SDES", TRACE_EVENT_SCOPE_THREAD,
- "cname", TRACE_STR_COPY(rtcp_field.c_name.name));
+ VLOG(1) << "Cast RTCP received SDES with cname " << rtcp_field.c_name.name;
}
void RtcpReceiver::HandleXr(RtcpParser* rtcp_parser) {
@@ -263,8 +302,11 @@ void RtcpReceiver::HandleDlrr(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleNACK(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
if (ssrc_ != rtcp_field.nack.media_ssrc) {
- // Not to us.
- rtcp_parser->Iterate();
+ RtcpFieldTypes field_type;
+ // Message not to us. Iterate until we have passed this message.
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpGenericRtpFeedbackNackItemCode);
return;
}
std::list<uint16> nackSequenceNumbers;
@@ -274,9 +316,6 @@ void RtcpReceiver::HandleNACK(RtcpParser* rtcp_parser) {
HandleNACKItem(&rtcp_field, &nackSequenceNumbers);
field_type = rtcp_parser->Iterate();
}
- if (sender_feedback_) {
- sender_feedback_->OnReceivedNackRequest(nackSequenceNumbers);
- }
}
void RtcpReceiver::HandleNACKItem(const RtcpField* rtcp_field,
@@ -298,8 +337,7 @@ void RtcpReceiver::HandleBYE(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
uint32 remote_ssrc = rtcp_field.bye.sender_ssrc;
if (remote_ssrc_ == remote_ssrc) {
- TRACE_EVENT_INSTANT1("cast_rtcp", "BYE", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc);
+ VLOG(1) << "Cast RTCP received BYE from SSRC " << remote_ssrc;
}
rtcp_parser->Iterate();
}
@@ -308,9 +346,7 @@ void RtcpReceiver::HandlePLI(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
if (ssrc_ == rtcp_field.pli.media_ssrc) {
// Received a signal that we need to send a new key frame.
- if (sender_feedback_) {
- sender_feedback_->OnReceivedIntraFrameRequest();
- }
+ VLOG(1) << "Cast RTCP received PLI on our SSRC " << ssrc_;
}
rtcp_parser->Iterate();
}
@@ -340,18 +376,22 @@ void RtcpReceiver::HandleRpsi(RtcpParser* rtcp_parser) {
rpsi_picture_id <<= 7; // Prepare next.
}
rpsi_picture_id += (rtcp_field.rpsi.native_bit_string[bytes - 1] & 0x7f);
- if (sender_feedback_) {
- sender_feedback_->OnReceivedRpsi(rtcp_field.rpsi.payload_type,
- rpsi_picture_id);
- }
+
+ VLOG(1) << "Cast RTCP received RPSI with picture_id " << rpsi_picture_id;
}
void RtcpReceiver::HandlePayloadSpecificApp(RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
uint32 remote_ssrc = rtcp_field.application_specific.sender_ssrc;
if (remote_ssrc_ != remote_ssrc) {
- // Message not to us.
- rtcp_parser->Iterate();
+ // Message not to us. Iterate until we have passed this message.
+ RtcpFieldTypes field_type;
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpPayloadSpecificRembCode ||
+ field_type == kRtcpPayloadSpecificRembItemCode ||
+ field_type == kRtcpPayloadSpecificCastCode ||
+ field_type == kRtcpPayloadSpecificCastNackItemCode);
return;
}
@@ -381,19 +421,101 @@ void RtcpReceiver::HandlePayloadSpecificRembItem(RtcpParser* rtcp_parser) {
for (int i = 0; i < rtcp_field.remb_item.number_of_ssrcs; ++i) {
if (rtcp_field.remb_item.ssrcs[i] == ssrc_) {
// Found matching ssrc.
- if (sender_feedback_) {
- sender_feedback_->OnReceivedRemb(rtcp_field.remb_item.bitrate);
- }
+ VLOG(1) << "Cast RTCP received REMB with received_bitrate "
+ << rtcp_field.remb_item.bitrate;
return;
}
}
}
-void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
+void RtcpReceiver::HandleApplicationSpecificCastReceiverLog(
+ RtcpParser* rtcp_parser) {
const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.cast_receiver_log.sender_ssrc;
+ if (remote_ssrc_ != remote_ssrc) {
+ // Message not to us. Iterate until we have passed this message.
+ RtcpFieldTypes field_type;
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpApplicationSpecificCastReceiverLogFrameCode ||
+ field_type == kRtcpApplicationSpecificCastReceiverLogEventCode);
+ return;
+ }
+ RtcpReceiverLogMessage receiver_log;
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastReceiverLogFrameCode) {
+ RtcpReceiverFrameLogMessage frame_log(
+ rtcp_field.cast_receiver_log.rtp_timestamp);
+
+ field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastReceiverLogEventCode) {
+ HandleApplicationSpecificCastReceiverEventLog(rtcp_parser,
+ &frame_log.event_log_messages_);
+ field_type = rtcp_parser->Iterate();
+ }
+ receiver_log.push_back(frame_log);
+ }
+
+ if (receiver_feedback_ && !receiver_log.empty()) {
+ receiver_feedback_->OnReceivedReceiverLog(receiver_log);
+ }
+}
+
+void RtcpReceiver::HandleApplicationSpecificCastReceiverEventLog(
+ RtcpParser* rtcp_parser,
+ RtcpReceiverEventLogMessages* event_log_messages) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+
+ RtcpReceiverEventLogMessage event_log;
+ event_log.type = TranslateToLogEventFromWireFormat(
+ rtcp_field.cast_receiver_log.event);
+ event_log.event_timestamp = base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(
+ rtcp_field.cast_receiver_log.event_timestamp_base +
+ rtcp_field.cast_receiver_log.event_timestamp_delta);
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id);
+ event_log.packet_id =
+ rtcp_field.cast_receiver_log.delay_delta_or_packet_id;
+ event_log_messages->push_back(event_log);
+}
+
+void RtcpReceiver::HandleApplicationSpecificCastSenderLog(
+ RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ uint32 remote_ssrc = rtcp_field.cast_sender_log.sender_ssrc;
+
+ if (remote_ssrc_ != remote_ssrc) {
+ RtcpFieldTypes field_type;
+ // Message not to us. Iterate until we have passed this message.
+ do {
+ field_type = rtcp_parser->Iterate();
+ } while (field_type == kRtcpApplicationSpecificCastSenderLogCode);
+ return;
+ }
+ RtcpSenderLogMessage sender_log;
+
+ RtcpFieldTypes field_type = rtcp_parser->Iterate();
+ while (field_type == kRtcpApplicationSpecificCastSenderLogCode) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
+ RtcpSenderFrameLogMessage frame_log;
+ frame_log.frame_status =
+ TranslateToFrameStatusFromWireFormat(rtcp_field.cast_sender_log.status);
+ frame_log.rtp_timestamp = rtcp_field.cast_sender_log.rtp_timestamp;
+ sender_log.push_back(frame_log);
+ field_type = rtcp_parser->Iterate();
+ }
+ if (receiver_feedback_) {
+ receiver_feedback_->OnReceivedSenderLog(sender_log);
+ }
+}
+
+void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
+ const RtcpField& rtcp_field = rtcp_parser->Field();
RtcpCastMessage cast_message(remote_ssrc_);
- cast_message.ack_frame_id_ = rtcp_field.cast_item.last_frame_id;
+ cast_message.ack_frame_id_ = ack_frame_id_wrap_helper_.MapTo32bitsFrameId(
+ rtcp_field.cast_item.last_frame_id);
RtcpFieldTypes packet_type = rtcp_parser->Iterate();
while (packet_type == kRtcpPayloadSpecificCastNackItemCode) {
@@ -409,17 +531,16 @@ void RtcpReceiver::HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandlePayloadSpecificCastNackItem(
const RtcpField* rtcp_field,
- std::map<uint8, std::set<uint16> >* missing_frames_and_packets) {
+ MissingFramesAndPacketsMap* missing_frames_and_packets) {
- std::map<uint8, std::set<uint16> >::iterator frame_it =
+ MissingFramesAndPacketsMap::iterator frame_it =
missing_frames_and_packets->find(rtcp_field->cast_nack_item.frame_id);
if (frame_it == missing_frames_and_packets->end()) {
// First missing packet in a frame.
- std::set<uint16> empty_set;
- std::pair<std::map<uint8, std::set<uint16> >::iterator, bool> ret;
- ret = missing_frames_and_packets->insert(
- std::pair<uint8, std::set<uint16> >(
+ PacketIdSet empty_set;
+ std::pair<MissingFramesAndPacketsMap::iterator, bool> ret =
+ missing_frames_and_packets->insert(std::pair<uint8, PacketIdSet>(
rtcp_field->cast_nack_item.frame_id, empty_set));
frame_it = ret.first;
DCHECK(frame_it != missing_frames_and_packets->end()) << "Invalid state";
@@ -455,10 +576,9 @@ void RtcpReceiver::HandleFIR(RtcpParser* rtcp_parser) {
void RtcpReceiver::HandleFIRItem(const RtcpField* rtcp_field) {
// Is it our sender that is requested to generate a new keyframe.
- if (ssrc_ != rtcp_field->fir_item.ssrc) return;
- if (sender_feedback_) {
- sender_feedback_->OnReceivedIntraFrameRequest();
- }
+ if (ssrc_ != rtcp_field->fir_item.ssrc) return;
+
+ VLOG(1) << "Cast RTCP received FIR on our SSRC " << ssrc_;
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_receiver.h b/chromium/media/cast/rtcp/rtcp_receiver.h
index 8c315d07b9d..81383c4ec10 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver.h
+++ b/chromium/media/cast/rtcp/rtcp_receiver.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
#define MEDIA_CAST_RTCP_RTCP_RECEIVER_H_
+#include "media/cast/net/cast_net_defines.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/rtcp_defines.h"
#include "media/cast/rtcp/rtcp_utility.h"
@@ -22,6 +23,12 @@ class RtcpReceiverFeedback {
virtual void OnReceivedSendReportRequest() = 0;
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) = 0;
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) = 0;
+
virtual ~RtcpReceiverFeedback() {}
};
@@ -37,7 +44,8 @@ class RtcpRttFeedback {
class RtcpReceiver {
public:
- explicit RtcpReceiver(RtcpSenderFeedback* sender_feedback,
+ explicit RtcpReceiver(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
RtcpReceiverFeedback* receiver_feedback,
RtcpRttFeedback* rtt_feedback,
uint32 local_ssrc);
@@ -87,7 +95,13 @@ class RtcpReceiver {
void HandlePayloadSpecificCastItem(RtcpParser* rtcp_parser);
void HandlePayloadSpecificCastNackItem(
const RtcpField* rtcp_field,
- std::map<uint8, std::set<uint16> >* missing_frames_and_packets);
+ MissingFramesAndPacketsMap* missing_frames_and_packets);
+
+ void HandleApplicationSpecificCastReceiverLog(RtcpParser* rtcp_parser);
+ void HandleApplicationSpecificCastSenderLog(RtcpParser* rtcp_parser);
+ void HandleApplicationSpecificCastReceiverEventLog(
+ RtcpParser* rtcp_parser,
+ RtcpReceiverEventLogMessages* event_log_messages);
const uint32 ssrc_;
uint32 remote_ssrc_;
@@ -96,6 +110,9 @@ class RtcpReceiver {
RtcpSenderFeedback* const sender_feedback_;
RtcpReceiverFeedback* const receiver_feedback_;
RtcpRttFeedback* const rtt_feedback_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ FrameIdWrapHelper ack_frame_id_wrap_helper_;
DISALLOW_COPY_AND_ASSIGN(RtcpReceiver);
};
diff --git a/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
index 5073944fa28..b5c5d2d3889 100644
--- a/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_receiver_unittest.cc
@@ -3,11 +3,14 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
#include "media/cast/rtcp/rtcp_receiver.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -20,24 +23,17 @@ static const uint32 kSourceSsrc = 0x40506;
static const uint32 kUnknownSsrc = 0xDEAD;
static const std::string kCName("test@10.1.1.1");
+namespace {
class SenderFeedbackCastVerification : public RtcpSenderFeedback {
public:
SenderFeedbackCastVerification() : called_(false) {}
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {};
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {};
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {};
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {};
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {};
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
EXPECT_EQ(cast_feedback.media_ssrc_, kSenderSsrc);
EXPECT_EQ(cast_feedback.ack_frame_id_, kAckFrameId);
- std::map<uint8, std::set<uint16> >::const_iterator frame_it =
+ MissingFramesAndPacketsMap::const_iterator frame_it =
cast_feedback.missing_frames_and_packets_.begin();
EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
@@ -47,7 +43,7 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
EXPECT_TRUE(frame_it != cast_feedback.missing_frames_and_packets_.end());
EXPECT_EQ(kFrameIdWithLostPackets, frame_it->first);
EXPECT_EQ(3UL, frame_it->second.size());
- std::set<uint16>::const_iterator packet_it = frame_it->second.begin();
+ PacketIdSet::const_iterator packet_it = frame_it->second.begin();
EXPECT_EQ(kLostPacketId1, *packet_it);
++packet_it;
EXPECT_EQ(kLostPacketId2, *packet_it);
@@ -58,38 +54,123 @@ class SenderFeedbackCastVerification : public RtcpSenderFeedback {
called_ = true;
}
- bool called() { return called_; }
+ bool called() const { return called_; }
private:
bool called_;
};
+class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
+ public:
+ RtcpReceiverCastLogVerification()
+ : called_on_received_sender_log_(false),
+ called_on_received_receiver_log_(false) {}
+
+ virtual void OnReceivedSenderReport(
+ const RtcpSenderInfo& remote_sender_info) OVERRIDE {};
+
+ virtual void OnReceiverReferenceTimeReport(
+ const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE {};
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {};
+
+ virtual void OnReceivedReceiverLog(
+ const RtcpReceiverLogMessage& receiver_log) OVERRIDE {
+ EXPECT_EQ(expected_receiver_log_.size(), receiver_log.size());
+ RtcpReceiverLogMessage::const_iterator expected_it =
+ expected_receiver_log_.begin();
+ RtcpReceiverLogMessage::const_iterator incoming_it = receiver_log.begin();
+ for (; incoming_it != receiver_log.end(); ++incoming_it) {
+ EXPECT_EQ(expected_it->rtp_timestamp_, incoming_it->rtp_timestamp_);
+ EXPECT_EQ(expected_it->event_log_messages_.size(),
+ incoming_it->event_log_messages_.size());
+
+ RtcpReceiverEventLogMessages::const_iterator event_incoming_it =
+ incoming_it->event_log_messages_.begin();
+ RtcpReceiverEventLogMessages::const_iterator event_expected_it =
+ expected_it->event_log_messages_.begin();
+ for (; event_incoming_it != incoming_it->event_log_messages_.end();
+ ++event_incoming_it, ++event_expected_it) {
+ EXPECT_EQ(event_expected_it->type, event_incoming_it->type);
+ EXPECT_EQ(event_expected_it->event_timestamp,
+ event_incoming_it->event_timestamp);
+ if (event_expected_it->type == kPacketReceived) {
+ EXPECT_EQ(event_expected_it->packet_id, event_incoming_it->packet_id);
+ } else {
+ EXPECT_EQ(event_expected_it->delay_delta,
+ event_incoming_it->delay_delta);
+ }
+ }
+ expected_receiver_log_.pop_front();
+ expected_it = expected_receiver_log_.begin();
+ }
+ called_on_received_receiver_log_ = true;
+ }
+
+ virtual void OnReceivedSenderLog(
+ const RtcpSenderLogMessage& sender_log) OVERRIDE {
+ EXPECT_EQ(expected_sender_log_.size(), sender_log.size());
+
+ RtcpSenderLogMessage::const_iterator expected_it =
+ expected_sender_log_.begin();
+ RtcpSenderLogMessage::const_iterator incoming_it = sender_log.begin();
+ for (; expected_it != expected_sender_log_.end();
+ ++expected_it, ++incoming_it) {
+ EXPECT_EQ(expected_it->frame_status, incoming_it->frame_status);
+ EXPECT_EQ(0xffffff & expected_it->rtp_timestamp,
+ incoming_it->rtp_timestamp);
+ }
+ called_on_received_sender_log_ = true;
+ }
+
+ bool OnReceivedSenderLogCalled() {
+ return called_on_received_sender_log_;
+ }
+
+ bool OnReceivedReceiverLogCalled() {
+ return called_on_received_receiver_log_ && expected_receiver_log_.empty();
+ }
+
+ void SetExpectedReceiverLog(const RtcpReceiverLogMessage& receiver_log) {
+ expected_receiver_log_ = receiver_log;
+ }
+
+ void SetExpectedSenderLog(const RtcpSenderLogMessage& sender_log) {
+ expected_sender_log_ = sender_log;
+ }
+
+ private:
+ RtcpReceiverLogMessage expected_receiver_log_;
+ RtcpSenderLogMessage expected_sender_log_;
+ bool called_on_received_sender_log_;
+ bool called_on_received_receiver_log_;
+};
+
+} // namespace
class RtcpReceiverTest : public ::testing::Test {
protected:
RtcpReceiverTest()
- : rtcp_receiver_(new RtcpReceiver(&mock_sender_feedback_,
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ rtcp_receiver_(new RtcpReceiver(cast_environment_,
+ &mock_sender_feedback_,
&mock_receiver_feedback_,
&mock_rtt_feedback_,
kSourceSsrc)) {
}
- ~RtcpReceiverTest() {}
+ virtual ~RtcpReceiverTest() {}
- void SetUp() OVERRIDE {
+ virtual void SetUp() OVERRIDE {
EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
EXPECT_CALL(mock_receiver_feedback_,
OnReceiverReferenceTimeReport(_)).Times(0);
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSendReportRequest()).Times(0);
-
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
-
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(_, _, _)).Times(0);
@@ -104,7 +185,7 @@ class RtcpReceiverTest : public ::testing::Test {
expected_report_block_.fraction_lost = kLoss >> 24;
expected_report_block_.cumulative_lost = kLoss & 0xffffff;
expected_report_block_.extended_high_sequence_number = kExtendedMax;
- expected_report_block_.jitter = kJitter;
+ expected_report_block_.jitter = kTestJitter;
expected_report_block_.last_sr = kLastSr;
expected_report_block_.delay_since_last_sr = kDelayLastSr;
expected_receiver_reference_report_.remote_ssrc = kSenderSsrc;
@@ -118,6 +199,9 @@ class RtcpReceiverTest : public ::testing::Test {
rtcp_receiver_->IncomingRtcpPacket(&rtcp_parser);
}
+ base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
MockRtcpReceiverFeedback mock_receiver_feedback_;
MockRtcpRttFeedback mock_rtt_feedback_;
MockRtcpSenderFeedback mock_sender_feedback_;
@@ -157,9 +241,6 @@ TEST_F(RtcpReceiverTest, InjectReceiveReportPacket) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
-
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -195,8 +276,6 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
InjectRtcpPacket(p1.Packet(), p1.Length());
EXPECT_CALL(mock_receiver_feedback_, OnReceivedSenderReport(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -216,8 +295,6 @@ TEST_F(RtcpReceiverTest, InjectSenderReportWithReportBlockPacket) {
EXPECT_CALL(mock_receiver_feedback_,
OnReceivedSenderReport(expected_sender_info_)).Times(1);
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -270,8 +347,6 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithRrtr) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -302,13 +377,10 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithIntraFrameRequest) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
kDelayLastSr)).Times(1);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
@@ -329,8 +401,6 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
// local ssrc.
InjectRtcpPacket(p1.Packet(), p1.Length());
- EXPECT_CALL(mock_sender_feedback_,
- OnReceivedReportBlock(expected_report_block_)).Times(1);
EXPECT_CALL(mock_rtt_feedback_,
OnReceivedDelaySinceLastReport(kSourceSsrc,
kLastSr,
@@ -351,7 +421,8 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
SenderFeedbackCastVerification sender_feedback_cast_verification;
- RtcpReceiver rtcp_receiver(&sender_feedback_cast_verification,
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &sender_feedback_cast_verification,
&mock_receiver_feedback_,
&mock_rtt_feedback_,
kSourceSsrc);
@@ -376,5 +447,140 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
EXPECT_TRUE(sender_feedback_cast_verification.called());
}
+TEST_F(RtcpReceiverTest, InjectSenderReportWithCastSenderLogVerification) {
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpSenderLogMessage sender_log;
+ for (int j = 0; j < 359; ++j) {
+ RtcpSenderFrameLogMessage sender_frame_log;
+ sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
+ sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
+ sender_log.push_back(sender_frame_log);
+ }
+ cast_log_verification.SetExpectedSenderLog(sender_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSenderSsrc, 0);
+ p.AddSdesCname(kSenderSsrc, kCName);
+ p.AddSenderLog(kSenderSsrc);
+
+ for (int i = 0; i < 359; ++i) {
+ p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
+ kRtpTimestamp + i * 90);
+ }
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedSenderLogCalled());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationBase) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpReceiverLogMessage receiver_log;
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ event_log.type = kPacketReceived;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.packet_id = kLostPacketId1;
+ frame_log.event_log_messages_.push_back(event_log);
+ receiver_log.push_back(frame_log);
+
+ cast_log_verification.SetExpectedReceiverLog(receiver_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSenderSsrc, 1);
+ p.AddRb(kSourceSsrc);
+ p.AddReceiverLog(kSenderSsrc);
+ p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ p.AddReceiverEventLog(kLostPacketId1, 6, kTimeDelayMs);
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
+ Times(1);
+
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
+}
+
+TEST_F(RtcpReceiverTest, InjectReceiverReportWithReceiverLogVerificationMulti) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ RtcpReceiverCastLogVerification cast_log_verification;
+ RtcpReceiver rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &cast_log_verification,
+ &mock_rtt_feedback_,
+ kSourceSsrc);
+ rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+
+ RtcpReceiverLogMessage receiver_log;
+
+ for (int j = 0; j < 100; ++j) {
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+ receiver_log.push_back(frame_log);
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ }
+
+ cast_log_verification.SetExpectedReceiverLog(receiver_log);
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSenderSsrc, 1);
+ p.AddRb(kSourceSsrc);
+ p.AddReceiverLog(kSenderSsrc);
+ for (int i = 0; i < 100; ++i) {
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ }
+
+ EXPECT_CALL(mock_rtt_feedback_,
+ OnReceivedDelaySinceLastReport(kSourceSsrc, kLastSr, kDelayLastSr)).
+ Times(1);
+
+ RtcpParser rtcp_parser(p.Packet(), p.Length());
+ rtcp_receiver.IncomingRtcpPacket(&rtcp_parser);
+
+ EXPECT_TRUE(cast_log_verification.OnReceivedReceiverLogCalled());
+}
+
+
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtcp/rtcp_sender.cc b/chromium/media/cast/rtcp/rtcp_sender.cc
index 89ea05e0531..b5cf4ce4ced 100644
--- a/chromium/media/cast/rtcp/rtcp_sender.cc
+++ b/chromium/media/cast/rtcp/rtcp_sender.cc
@@ -7,69 +7,203 @@
#include <algorithm>
#include <vector>
-#include "base/debug/trace_event.h"
#include "base/logging.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_utility.h"
#include "net/base/big_endian.h"
+static const size_t kRtcpCastLogHeaderSize = 12;
+static const size_t kRtcpSenderFrameLogSize = 4;
+static const size_t kRtcpReceiverFrameLogSize = 8;
+static const size_t kRtcpReceiverEventLogSize = 4;
+
+namespace {
+uint16 MergeEventTypeAndTimestampForWireFormat(
+ const media::cast::CastLoggingEvent& event,
+ const base::TimeDelta& time_delta) {
+ int64 time_delta_ms = time_delta.InMilliseconds();
+ // Max delta is 4096 milliseconds.
+ DCHECK_GE(GG_INT64_C(0xfff), time_delta_ms);
+
+ uint16 event_type_and_timestamp_delta =
+ static_cast<uint16>(time_delta_ms & 0xfff);
+
+ uint16 event_type = 0;
+ switch (event) {
+ case media::cast::kAckSent:
+ event_type = 1;
+ break;
+ case media::cast::kAudioPlayoutDelay:
+ event_type = 2;
+ break;
+ case media::cast::kAudioFrameDecoded:
+ event_type = 3;
+ break;
+ case media::cast::kVideoFrameDecoded:
+ event_type = 4;
+ break;
+ case media::cast::kVideoRenderDelay:
+ event_type = 5;
+ break;
+ case media::cast::kPacketReceived:
+ event_type = 6;
+ break;
+ default:
+ NOTREACHED();
+ }
+ DCHECK(!(event_type & 0xfff0));
+ return (event_type << 12) + event_type_and_timestamp_delta;
+}
+
+bool ScanRtcpReceiverLogMessage(
+ const media::cast::RtcpReceiverLogMessage& receiver_log_message,
+ size_t start_size,
+ size_t* number_of_frames,
+ size_t* total_number_of_messages_to_send,
+ size_t* rtcp_log_size) {
+ if (receiver_log_message.empty()) return false;
+
+ size_t remaining_space = media::cast::kIpPacketSize - start_size;
+
+ // We must have space for at least one message
+ DCHECK_GE(remaining_space, kRtcpCastLogHeaderSize +
+ kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize)
+ << "Not enough buffer space";
+
+ if (remaining_space < kRtcpCastLogHeaderSize + kRtcpReceiverFrameLogSize +
+ kRtcpReceiverEventLogSize) {
+ return false;
+ }
+ // Account for the RTCP header for an application-defined packet.
+ remaining_space -= kRtcpCastLogHeaderSize;
+
+ media::cast::RtcpReceiverLogMessage::const_iterator frame_it =
+ receiver_log_message.begin();
+ for (; frame_it != receiver_log_message.end(); ++frame_it) {
+ (*number_of_frames)++;
+
+ remaining_space -= kRtcpReceiverFrameLogSize;
+
+ size_t messages_in_frame = frame_it->event_log_messages_.size();
+ size_t remaining_space_in_messages =
+ remaining_space / kRtcpReceiverEventLogSize;
+ size_t messages_to_send = std::min(messages_in_frame,
+ remaining_space_in_messages);
+ if (messages_to_send > media::cast::kRtcpMaxReceiverLogMessages) {
+ // We can't send more than 256 messages.
+ remaining_space -= media::cast::kRtcpMaxReceiverLogMessages *
+ kRtcpReceiverEventLogSize;
+ *total_number_of_messages_to_send +=
+ media::cast::kRtcpMaxReceiverLogMessages;
+ break;
+ }
+ remaining_space -= messages_to_send * kRtcpReceiverEventLogSize;
+ *total_number_of_messages_to_send += messages_to_send;
+
+ if (remaining_space <
+ kRtcpReceiverFrameLogSize + kRtcpReceiverEventLogSize) {
+ // Make sure that we have room for at least one more message.
+ break;
+ }
+ }
+ *rtcp_log_size = kRtcpCastLogHeaderSize +
+ *number_of_frames * kRtcpReceiverFrameLogSize +
+ *total_number_of_messages_to_send * kRtcpReceiverEventLogSize;
+ DCHECK_GE(media::cast::kIpPacketSize,
+ start_size + *rtcp_log_size) << "Not enough buffer space";
+
+ VLOG(1) << "number of frames " << *number_of_frames;
+ VLOG(1) << "total messages to send " << *total_number_of_messages_to_send;
+ VLOG(1) << "rtcp log size " << *rtcp_log_size;
+ return true;
+}
+} // namespace
+
namespace media {
namespace cast {
-static const int kRtcpMaxNackFields = 253;
-static const int kRtcpMaxCastLossFields = 100;
-
-RtcpSender::RtcpSender(PacedPacketSender* outgoing_transport,
+RtcpSender::RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name)
: ssrc_(sending_ssrc),
c_name_(c_name),
- transport_(outgoing_transport) {
+ transport_(outgoing_transport),
+ cast_environment_(cast_environment) {
DCHECK_LT(c_name_.length(), kRtcpCnameSize) << "Invalid config";
}
RtcpSender::~RtcpSender() {}
-void RtcpSender::SendRtcp(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpReportBlock* report_block,
- uint32 pli_remote_ssrc,
- const RtcpDlrrReportBlock* dlrr,
- const RtcpReceiverReferenceTimeReport* rrtr,
- const RtcpCastMessage* cast_message) {
+void RtcpSender::SendRtcpFromRtpSender(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpDlrrReportBlock* dlrr,
+ RtcpSenderLogMessage* sender_log) {
+ if (packet_type_flags & kRtcpRr ||
+ packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRrtr ||
+ packet_type_flags & kRtcpCast ||
+ packet_type_flags & kRtcpReceiverLog ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
+ NOTREACHED() << "Invalid argument";
+ }
+
std::vector<uint8> packet;
packet.reserve(kIpPacketSize);
if (packet_type_flags & kRtcpSr) {
DCHECK(sender_info) << "Invalid argument";
- BuildSR(*sender_info, report_block, &packet);
+ BuildSR(*sender_info, NULL, &packet);
BuildSdec(&packet);
- } else if (packet_type_flags & kRtcpRr) {
- BuildRR(report_block, &packet);
- if (!c_name_.empty()) {
- BuildSdec(&packet);
- }
- }
- if (packet_type_flags & kRtcpPli) {
- BuildPli(pli_remote_ssrc, &packet);
}
if (packet_type_flags & kRtcpBye) {
BuildBye(&packet);
}
- if (packet_type_flags & kRtcpRpsi) {
- // Implement this for webrtc interop.
- NOTIMPLEMENTED();
+ if (packet_type_flags & kRtcpDlrr) {
+ DCHECK(dlrr) << "Invalid argument";
+ BuildDlrrRb(dlrr, &packet);
}
- if (packet_type_flags & kRtcpRemb) {
- // Implement this for webrtc interop.
- NOTIMPLEMENTED();
+ if (packet_type_flags & kRtcpSenderLog) {
+ DCHECK(sender_log) << "Invalid argument";
+ BuildSenderLog(sender_log, &packet);
+ }
+ if (packet.empty())
+ return; // Sanity don't send empty packets.
+
+ transport_->SendRtcpPacket(packet);
+}
+
+void RtcpSender::SendRtcpFromRtpReceiver(
+ uint32 packet_type_flags,
+ const RtcpReportBlock* report_block,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message,
+ RtcpReceiverLogMessage* receiver_log) {
+ if (packet_type_flags & kRtcpSr ||
+ packet_type_flags & kRtcpDlrr ||
+ packet_type_flags & kRtcpSenderLog) {
+ NOTREACHED() << "Invalid argument";
}
- if (packet_type_flags & kRtcpNack) {
- // Implement this for webrtc interop.
+ if (packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
+ // Implement these for webrtc interop.
NOTIMPLEMENTED();
}
- if (packet_type_flags & kRtcpDlrr) {
- DCHECK(dlrr) << "Invalid argument";
- BuildDlrrRb(dlrr, &packet);
+ std::vector<uint8> packet;
+ packet.reserve(kIpPacketSize);
+
+ if (packet_type_flags & kRtcpRr) {
+ BuildRR(report_block, &packet);
+ if (!c_name_.empty()) {
+ BuildSdec(&packet);
+ }
+ }
+ if (packet_type_flags & kRtcpBye) {
+ BuildBye(&packet);
}
if (packet_type_flags & kRtcpRrtr) {
DCHECK(rrtr) << "Invalid argument";
@@ -79,7 +213,10 @@ void RtcpSender::SendRtcp(uint32 packet_type_flags,
DCHECK(cast_message) << "Invalid argument";
BuildCast(cast_message, &packet);
}
-
+ if (packet_type_flags & kRtcpReceiverLog) {
+ DCHECK(receiver_log) << "Invalid argument";
+ BuildReceiverLog(receiver_log, &packet);
+ }
if (packet.empty()) return; // Sanity don't send empty packets.
transport_->SendRtcpPacket(packet);
@@ -98,14 +235,14 @@ void RtcpSender::BuildSR(const RtcpSenderInfo& sender_info,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 28);
big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
- big_endian_writer.WriteU8(200);
+ big_endian_writer.WriteU8(kPacketTypeSenderReport);
big_endian_writer.WriteU16(number_of_rows);
big_endian_writer.WriteU32(ssrc_);
big_endian_writer.WriteU32(sender_info.ntp_seconds);
big_endian_writer.WriteU32(sender_info.ntp_fraction);
big_endian_writer.WriteU32(sender_info.rtp_timestamp);
big_endian_writer.WriteU32(sender_info.send_packet_count);
- big_endian_writer.WriteU32(sender_info.send_octet_count);
+ big_endian_writer.WriteU32(static_cast<uint32>(sender_info.send_octet_count));
if (report_block) {
AddReportBlocks(*report_block, packet); // Adds 24 bytes.
@@ -123,7 +260,7 @@ void RtcpSender::BuildRR(const RtcpReportBlock* report_block,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
- big_endian_writer.WriteU8(201);
+ big_endian_writer.WriteU8(kPacketTypeReceiverReport);
big_endian_writer.WriteU16(number_of_rows);
big_endian_writer.WriteU32(ssrc_);
@@ -172,10 +309,10 @@ void RtcpSender::BuildSdec(std::vector<uint8>* packet) const {
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 10);
// We always need to add one SDES CNAME.
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(202);
+ big_endian_writer.WriteU8(kPacketTypeSdes);
// Handle SDES length later on.
- uint32 sdes_length_position = start_size + 3;
+ uint32 sdes_length_position = static_cast<uint32>(start_size) + 3;
big_endian_writer.WriteU16(0);
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU8(1); // CNAME = 1
@@ -199,7 +336,7 @@ void RtcpSender::BuildSdec(std::vector<uint8>* packet) const {
sdes_length += padding;
// In 32-bit words minus one and we don't count the header.
- uint8 buffer_length = (sdes_length / 4) - 1;
+ uint8 buffer_length = static_cast<uint8>((sdes_length / 4) - 1);
(*packet)[sdes_length_position] = buffer_length;
}
@@ -214,13 +351,10 @@ void RtcpSender::BuildPli(uint32 remote_ssrc,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 12);
uint8 FMT = 1; // Picture loss indicator.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU16(2); // Used fixed length of 2.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(remote_ssrc); // Add the remote SSRC.
- TRACE_EVENT_INSTANT2("cast_rtcp", "RtcpSender::PLI", TRACE_EVENT_SCOPE_THREAD,
- "remote_ssrc", remote_ssrc,
- "ssrc", ssrc_);
}
/*
@@ -243,7 +377,7 @@ void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
uint8 FMT = 3; // Reference Picture Selection Indication.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
// Calculate length.
uint32 bits_required = 7;
@@ -300,13 +434,13 @@ void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
// Add application layer feedback.
uint8 FMT = 15;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
- big_endian_writer.WriteU8(remb->remb_ssrcs.size() + 4);
+ big_endian_writer.WriteU8(static_cast<uint8>(remb->remb_ssrcs.size() + 4));
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(0); // Remote SSRC must be 0.
big_endian_writer.WriteU32(kRemb);
- big_endian_writer.WriteU8(remb->remb_ssrcs.size());
+ big_endian_writer.WriteU8(static_cast<uint8>(remb->remb_ssrcs.size()));
// 6 bit exponent and a 18 bit mantissa.
uint8 bitrate_exponent;
@@ -324,8 +458,8 @@ void RtcpSender::BuildRemb(const RtcpRembMessage* remb,
for (; it != remb->remb_ssrcs.end(); ++it) {
big_endian_writer.WriteU32(*it);
}
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::RembBitrate", ssrc_,
- remb->remb_bitrate);
+ cast_environment_->Logging()->InsertGenericEvent(kRembBitrate,
+ remb->remb_bitrate);
}
void RtcpSender::BuildNack(const RtcpNackMessage* nack,
@@ -340,18 +474,18 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack,
uint8 FMT = 1;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(205);
+ big_endian_writer.WriteU8(kPacketTypeGenericRtpFeedback);
big_endian_writer.WriteU8(0);
- int nack_size_pos = start_size + 3;
+ size_t nack_size_pos = start_size + 3;
big_endian_writer.WriteU8(3);
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(nack->remote_ssrc); // Add the remote SSRC.
// Build NACK bitmasks and write them to the Rtcp message.
// The nack list should be sorted and not contain duplicates.
- int number_of_nack_fields = 0;
- int max_number_of_nack_fields =
- std::min<int>(kRtcpMaxNackFields, (kIpPacketSize - packet->size()) / 4);
+ size_t number_of_nack_fields = 0;
+ size_t max_number_of_nack_fields = std::min<size_t>(kRtcpMaxNackFields,
+ (kIpPacketSize - packet->size()) / 4);
std::list<uint16>::const_iterator it = nack->nack_list.begin();
while (it != nack->nack_list.end() &&
@@ -379,9 +513,8 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack,
big_endian_nack_writer.WriteU16(bitmask);
number_of_nack_fields++;
}
+ DCHECK_GE(kRtcpMaxNackFields, number_of_nack_fields);
(*packet)[nack_size_pos] = static_cast<uint8>(2 + number_of_nack_fields);
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::NACK", ssrc_,
- nack->nack_list.size());
}
void RtcpSender::BuildBye(std::vector<uint8>* packet) const {
@@ -393,7 +526,7 @@ void RtcpSender::BuildBye(std::vector<uint8>* packet) const {
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 8);
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(203);
+ big_endian_writer.WriteU8(kPacketTypeBye);
big_endian_writer.WriteU16(1); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
}
@@ -425,7 +558,7 @@ void RtcpSender::BuildDlrrRb(const RtcpDlrrReportBlock* dlrr,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 24);
big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(207);
+ big_endian_writer.WriteU8(kPacketTypeXr);
big_endian_writer.WriteU16(5); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU8(5); // Add block type.
@@ -447,7 +580,7 @@ void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(207);
+ big_endian_writer.WriteU8(kPacketTypeXr);
big_endian_writer.WriteU16(4); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU8(4); // Add block type.
@@ -470,24 +603,24 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), 20);
uint8 FMT = 15; // Application layer feedback.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(206);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
- int cast_size_pos = start_size + 3; // Save length position.
+ size_t cast_size_pos = start_size + 3; // Save length position.
big_endian_writer.WriteU8(4);
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(cast->media_ssrc_); // Remote SSRC.
big_endian_writer.WriteU32(kCast);
- big_endian_writer.WriteU8(cast->ack_frame_id_);
- int cast_loss_field_pos = start_size + 17; // Save loss field position.
+ big_endian_writer.WriteU8(static_cast<uint8>(cast->ack_frame_id_));
+ size_t cast_loss_field_pos = start_size + 17; // Save loss field position.
big_endian_writer.WriteU8(0); // Overwritten with number_of_loss_fields.
big_endian_writer.WriteU8(0); // Reserved.
big_endian_writer.WriteU8(0); // Reserved.
- int number_of_loss_fields = 0;
- int max_number_of_loss_fields = std::min<int>(kRtcpMaxCastLossFields,
+ size_t number_of_loss_fields = 0;
+ size_t max_number_of_loss_fields = std::min<size_t>(kRtcpMaxCastLossFields,
(kIpPacketSize - packet->size()) / 4);
- std::map<uint8, std::set<uint16> >::const_iterator frame_it =
+ MissingFramesAndPacketsMap::const_iterator frame_it =
cast->missing_frames_and_packets_.begin();
for (; frame_it != cast->missing_frames_and_packets_.end() &&
@@ -498,12 +631,12 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
start_size = packet->size();
packet->resize(start_size + 4);
net::BigEndianWriter big_endian_nack_writer(&((*packet)[start_size]), 4);
- big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(kRtcpCastAllPacketsLost);
big_endian_nack_writer.WriteU8(0);
++number_of_loss_fields;
} else {
- std::set<uint16>::const_iterator packet_it = frame_it->second.begin();
+ PacketIdSet::const_iterator packet_it = frame_it->second.begin();
while (packet_it != frame_it->second.end()) {
uint16 packet_id = *packet_it;
@@ -513,7 +646,7 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
&((*packet)[start_size]), 4);
// Write frame and packet id to buffer before calculating bitmask.
- big_endian_nack_writer.WriteU8(frame_it->first);
+ big_endian_nack_writer.WriteU8(static_cast<uint8>(frame_it->first));
big_endian_nack_writer.WriteU16(packet_id);
uint8 bitmask = 0;
@@ -532,12 +665,134 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
}
}
}
+ DCHECK_LE(number_of_loss_fields, kRtcpMaxCastLossFields);
(*packet)[cast_size_pos] = static_cast<uint8>(4 + number_of_loss_fields);
(*packet)[cast_loss_field_pos] = static_cast<uint8>(number_of_loss_fields);
+}
- // Frames with missing packets.
- TRACE_COUNTER_ID1("cast_rtcp", "RtcpSender::CastNACK", ssrc_,
- cast->missing_frames_and_packets_.size());
+void RtcpSender::BuildSenderLog(RtcpSenderLogMessage* sender_log_message,
+ std::vector<uint8>* packet) const {
+ DCHECK(sender_log_message);
+ DCHECK(packet);
+ size_t start_size = packet->size();
+ size_t remaining_space = kIpPacketSize - start_size;
+ DCHECK_GE(remaining_space, kRtcpCastLogHeaderSize + kRtcpSenderFrameLogSize)
+ << "Not enough buffer space";
+ if (remaining_space < kRtcpCastLogHeaderSize + kRtcpSenderFrameLogSize)
+ return;
+
+ size_t space_for_x_messages =
+ (remaining_space - kRtcpCastLogHeaderSize) / kRtcpSenderFrameLogSize;
+ size_t number_of_messages = std::min(space_for_x_messages,
+ sender_log_message->size());
+
+ size_t log_size = kRtcpCastLogHeaderSize +
+ number_of_messages * kRtcpSenderFrameLogSize;
+ packet->resize(start_size + log_size);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[start_size]), log_size);
+ big_endian_writer.WriteU8(0x80 + kSenderLogSubtype);
+ big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
+ big_endian_writer.WriteU16(static_cast<uint16>(2 + number_of_messages));
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(kCast);
+
+ for (; number_of_messages > 0; --number_of_messages) {
+ DCHECK(!sender_log_message->empty());
+ const RtcpSenderFrameLogMessage& message = sender_log_message->front();
+ big_endian_writer.WriteU8(static_cast<uint8>(message.frame_status));
+ // We send the 24 east significant bits of the RTP timestamp.
+ big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 16));
+ big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp >> 8));
+ big_endian_writer.WriteU8(static_cast<uint8>(message.rtp_timestamp));
+ sender_log_message->pop_front();
+ }
+}
+
+void RtcpSender::BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
+ std::vector<uint8>* packet) const {
+ DCHECK(receiver_log_message);
+ const size_t packet_start_size = packet->size();
+ size_t number_of_frames = 0;
+ size_t total_number_of_messages_to_send = 0;
+ size_t rtcp_log_size = 0;
+
+ if (!ScanRtcpReceiverLogMessage(*receiver_log_message,
+ packet_start_size,
+ &number_of_frames,
+ &total_number_of_messages_to_send,
+ &rtcp_log_size)) {
+ return;
+ }
+ packet->resize(packet_start_size + rtcp_log_size);
+
+ net::BigEndianWriter big_endian_writer(&((*packet)[packet_start_size]),
+ rtcp_log_size);
+ big_endian_writer.WriteU8(0x80 + kReceiverLogSubtype);
+ big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
+ big_endian_writer.WriteU16(static_cast<uint16>(2 + 2 * number_of_frames +
+ total_number_of_messages_to_send));
+ big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
+ big_endian_writer.WriteU32(kCast);
+
+ while (!receiver_log_message->empty() &&
+ total_number_of_messages_to_send > 0) {
+ RtcpReceiverFrameLogMessage& frame_log_messages =
+ receiver_log_message->front();
+ // Add our frame header.
+ big_endian_writer.WriteU32(frame_log_messages.rtp_timestamp_);
+ size_t messages_in_frame = frame_log_messages.event_log_messages_.size();
+ if (messages_in_frame > total_number_of_messages_to_send) {
+ // We are running out of space.
+ messages_in_frame = total_number_of_messages_to_send;
+ }
+ // Keep track of how many messages we have left to send.
+ total_number_of_messages_to_send -= messages_in_frame;
+
+ // On the wire format is number of messages - 1.
+ big_endian_writer.WriteU8(static_cast<uint8>(messages_in_frame - 1));
+
+ base::TimeTicks event_timestamp_base =
+ frame_log_messages.event_log_messages_.front().event_timestamp;
+ uint32 base_timestamp_ms =
+ (event_timestamp_base - base::TimeTicks()).InMilliseconds();
+ big_endian_writer.WriteU8(static_cast<uint8>(base_timestamp_ms >> 16));
+ big_endian_writer.WriteU8(static_cast<uint8>(base_timestamp_ms >> 8));
+ big_endian_writer.WriteU8(static_cast<uint8>(base_timestamp_ms));
+
+ while (!frame_log_messages.event_log_messages_.empty() &&
+ messages_in_frame > 0) {
+ const RtcpReceiverEventLogMessage& event_message =
+ frame_log_messages.event_log_messages_.front();
+ uint16 event_type_and_timestamp_delta =
+ MergeEventTypeAndTimestampForWireFormat(event_message.type,
+ event_message.event_timestamp - event_timestamp_base);
+ switch (event_message.type) {
+ case kAckSent:
+ case kAudioPlayoutDelay:
+ case kAudioFrameDecoded:
+ case kVideoFrameDecoded:
+ case kVideoRenderDelay:
+ big_endian_writer.WriteU16(static_cast<uint16>(
+ event_message.delay_delta.InMilliseconds()));
+ big_endian_writer.WriteU16(event_type_and_timestamp_delta);
+ break;
+ case kPacketReceived:
+ big_endian_writer.WriteU16(event_message.packet_id);
+ big_endian_writer.WriteU16(event_type_and_timestamp_delta);
+ break;
+ default:
+ NOTREACHED();
+ }
+ messages_in_frame--;
+ frame_log_messages.event_log_messages_.pop_front();
+ }
+ if (frame_log_messages.event_log_messages_.empty()) {
+ // We sent all messages on this frame; pop the frame header.
+ receiver_log_message->pop_front();
+ }
+ }
+ DCHECK_EQ(total_number_of_messages_to_send, 0);
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_sender.h b/chromium/media/cast/rtcp/rtcp_sender.h
index 7dbbc0f95b5..e931c693c0f 100644
--- a/chromium/media/cast/rtcp/rtcp_sender.h
+++ b/chromium/media/cast/rtcp/rtcp_sender.h
@@ -18,19 +18,23 @@ namespace cast {
class RtcpSender {
public:
- RtcpSender(PacedPacketSender* const paced_packet_sender,
+ RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
+ PacedPacketSender* const paced_packet_sender,
uint32 sending_ssrc,
const std::string& c_name);
virtual ~RtcpSender();
- void SendRtcp(uint32 packet_type_flags,
- const RtcpSenderInfo* sender_info,
- const RtcpReportBlock* report_block,
- uint32 pli_remote_ssrc,
- const RtcpDlrrReportBlock* dlrr,
- const RtcpReceiverReferenceTimeReport* rrtr,
- const RtcpCastMessage* cast_message);
+ void SendRtcpFromRtpSender(uint32 packet_type_flags,
+ const RtcpSenderInfo* sender_info,
+ const RtcpDlrrReportBlock* dlrr,
+ RtcpSenderLogMessage* sender_log);
+
+ void SendRtcpFromRtpReceiver(uint32 packet_type_flags,
+ const RtcpReportBlock* report_block,
+ const RtcpReceiverReferenceTimeReport* rrtr,
+ const RtcpCastMessage* cast_message,
+ RtcpReceiverLogMessage* receiver_log);
enum RtcpPacketType {
kRtcpSr = 0x0002,
@@ -45,6 +49,8 @@ class RtcpSender {
kRtcpRpsi = 0x8000,
kRtcpRemb = 0x10000,
kRtcpCast = 0x20000,
+ kRtcpSenderLog = 0x40000,
+ kRtcpReceiverLog = 0x80000,
};
private:
@@ -83,6 +89,12 @@ class RtcpSender {
void BuildCast(const RtcpCastMessage* cast_message,
std::vector<uint8>* packet) const;
+ void BuildSenderLog(RtcpSenderLogMessage* sender_log_message,
+ std::vector<uint8>* packet) const;
+
+ void BuildReceiverLog(RtcpReceiverLogMessage* receiver_log_message,
+ std::vector<uint8>* packet) const;
+
inline void BitrateToRembExponentBitrate(uint32 bitrate,
uint8* exponent,
uint32* mantissa) const {
@@ -102,6 +114,7 @@ class RtcpSender {
// Not owned by this class.
PacedPacketSender* transport_;
+ scoped_refptr<CastEnvironment> cast_environment_;
DISALLOW_COPY_AND_ASSIGN(RtcpSender);
};
diff --git a/chromium/media/cast/rtcp/rtcp_sender_unittest.cc b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
index b7daf37719f..16e9ee18ffb 100644
--- a/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_sender_unittest.cc
@@ -3,19 +3,24 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/rtcp/rtcp_sender.h"
+#include "media/cast/rtcp/rtcp_utility.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
-static const int kRtcpInterval = 1000;
+namespace {
static const uint32 kSendingSsrc = 0x12345678;
static const uint32 kMediaSsrc = 0x87654321;
static const std::string kCName("test@10.1.1.1");
+} // namespace
class TestRtcpTransport : public PacedPacketSender {
public:
@@ -24,29 +29,27 @@ class TestRtcpTransport : public PacedPacketSender {
packet_count_(0) {
}
- virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
+ virtual bool SendRtcpPacket(const Packet& packet) OVERRIDE {
EXPECT_EQ(expected_packet_length_, packet.size());
EXPECT_EQ(0, memcmp(expected_packet_, &(packet[0]), packet.size()));
packet_count_++;
return true;
}
- virtual bool SendPacket(const std::vector<uint8>& packet,
- int num_of_packets) {
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
return false;
}
- virtual bool ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets) {
+ virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
return false;
}
- void SetExpectedRtcpPacket(const uint8* rtcp_buffer, int length) {
+ void SetExpectedRtcpPacket(const uint8* rtcp_buffer, size_t length) {
expected_packet_length_ = length;
memcpy(expected_packet_, rtcp_buffer, length);
}
- int packet_count() { return packet_count_; }
+ int packet_count() const { return packet_count_; }
private:
uint8 expected_packet_[kIpPacketSize];
@@ -57,12 +60,20 @@ class TestRtcpTransport : public PacedPacketSender {
class RtcpSenderTest : public ::testing::Test {
protected:
RtcpSenderTest()
- : rtcp_sender_(new RtcpSender(&test_transport_,
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ rtcp_sender_(new RtcpSender(cast_environment_,
+ &test_transport_,
kSendingSsrc,
kCName)) {
}
+ base::SimpleTestTickClock testing_clock_;
TestRtcpTransport test_transport_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<RtcpSender> rtcp_sender_;
};
@@ -80,13 +91,10 @@ TEST_F(RtcpSenderTest, RtcpSenderReport) {
p.AddSdesCname(kSendingSsrc, kCName);
test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr,
- &sender_info,
- NULL,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpSender(RtcpSender::kRtcpSr,
+ &sender_info,
+ NULL,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -98,13 +106,8 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
p1.AddSdesCname(kSendingSsrc, kCName);
test_transport_.SetExpectedRtcpPacket(p1.Packet(), p1.Length());
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
- NULL,
- NULL,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr,
+ NULL, NULL, NULL, NULL);
EXPECT_EQ(1, test_transport_.packet_count());
@@ -121,19 +124,13 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number =
- kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr,
- NULL,
- &report_block,
- 0,
- NULL,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(RtcpSender::kRtcpRr, &report_block,
+ NULL, NULL, NULL);
EXPECT_EQ(2, test_transport_.packet_count());
}
@@ -158,15 +155,92 @@ TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrr) {
dlrr_rb.last_rr = kLastRr;
dlrr_rb.delay_since_last_rr = kDelayLastRr;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
- &sender_info,
- NULL,
- 0,
- &dlrr_rb,
- NULL,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpSender(
+ RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr,
+ &sender_info,
+ &dlrr_rb,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpSenderReportWithDlrrAndLog) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name + dlrr + sender log.
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSendingSsrc, 0);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddXrHeader(kSendingSsrc);
+ p.AddXrDlrrBlock(kSendingSsrc);
+ p.AddSenderLog(kSendingSsrc);
+ p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork, kRtpTimestamp);
+
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpDlrrReportBlock dlrr_rb;
+ dlrr_rb.last_rr = kLastRr;
+ dlrr_rb.delay_since_last_rr = kDelayLastRr;
+
+ RtcpSenderFrameLogMessage sender_frame_log;
+ sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
+ sender_frame_log.rtp_timestamp = kRtpTimestamp;
+
+ RtcpSenderLogMessage sender_log;
+ sender_log.push_back(sender_frame_log);
+
+ rtcp_sender_->SendRtcpFromRtpSender(
+ RtcpSender::kRtcpSr | RtcpSender::kRtcpDlrr | RtcpSender::kRtcpSenderLog,
+ &sender_info,
+ &dlrr_rb,
+ &sender_log);
EXPECT_EQ(1, test_transport_.packet_count());
+ EXPECT_TRUE(sender_log.empty());
+}
+
+TEST_F(RtcpSenderTest, RtcpSenderReporWithTooManyLogFrames) {
+ RtcpSenderInfo sender_info;
+ sender_info.ntp_seconds = kNtpHigh;
+ sender_info.ntp_fraction = kNtpLow;
+ sender_info.rtp_timestamp = kRtpTimestamp;
+ sender_info.send_packet_count = kSendPacketCount;
+ sender_info.send_octet_count = kSendOctetCount;
+
+ // Sender report + c_name + sender log.
+ TestRtcpPacketBuilder p;
+ p.AddSr(kSendingSsrc, 0);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddSenderLog(kSendingSsrc);
+
+ for (int i = 0; i < 359; ++i) {
+ p.AddSenderFrameLog(kRtcpSenderFrameStatusSentToNetwork,
+ kRtpTimestamp + i * 90);
+ }
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+
+ RtcpSenderLogMessage sender_log;
+ for (int j = 0; j < 400; ++j) {
+ RtcpSenderFrameLogMessage sender_frame_log;
+ sender_frame_log.frame_status = kRtcpSenderFrameStatusSentToNetwork;
+ sender_frame_log.rtp_timestamp = kRtpTimestamp + j * 90;
+ sender_log.push_back(sender_frame_log);
+ }
+
+ rtcp_sender_->SendRtcpFromRtpSender(
+ RtcpSender::kRtcpSr | RtcpSender::kRtcpSenderLog,
+ &sender_info,
+ NULL,
+ &sender_log);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+ EXPECT_EQ(41u, sender_log.size());
}
TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
@@ -185,9 +259,8 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number =
- kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
@@ -195,13 +268,12 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
rrtr.ntp_seconds = kNtpHigh;
rrtr.ntp_fraction = kNtpLow;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
- NULL,
- &report_block,
- 0,
- NULL,
- &rrtr,
- NULL);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr,
+ &report_block,
+ &rrtr,
+ NULL,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
@@ -222,15 +294,14 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
report_block.extended_high_sequence_number = kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
RtcpCastMessage cast_message(kMediaSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
- std::set<uint16_t> missing_packets;
- cast_message.missing_frames_and_packets_[
- kLostFrameId] = missing_packets;
+ PacketIdSet missing_packets;
+ cast_message.missing_frames_and_packets_[kLostFrameId] = missing_packets;
missing_packets.insert(kLostPacketId1);
missing_packets.insert(kLostPacketId2);
@@ -238,47 +309,281 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
missing_packets;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
- NULL,
- &report_block,
- 0,
- NULL,
- NULL,
- &cast_message);
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpCast,
+ &report_block,
+ NULL,
+ &cast_message,
+ NULL);
EXPECT_EQ(1, test_transport_.packet_count());
}
-TEST_F(RtcpSenderTest, RtcpReceiverReportWithIntraFrameRequest) {
- // Receiver report with report block + c_name.
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddXrHeader(kSendingSsrc);
+ p.AddXrRrtrBlock();
+ p.AddCast(kSendingSsrc, kMediaSsrc);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ RtcpReceiverReferenceTimeReport rrtr;
+ rrtr.ntp_seconds = kNtpHigh;
+ rrtr.ntp_fraction = kNtpLow;
+
+ RtcpCastMessage cast_message(kMediaSsrc);
+ cast_message.ack_frame_id_ = kAckFrameId;
+ PacketIdSet missing_packets;
+ cast_message.missing_frames_and_packets_[kLostFrameId] = missing_packets;
+
+ missing_packets.insert(kLostPacketId1);
+ missing_packets.insert(kLostPacketId2);
+ missing_packets.insert(kLostPacketId3);
+ cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
+ missing_packets;
+
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast,
+ &report_block,
+ &rrtr,
+ &cast_message,
+ NULL);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+ p.AddXrHeader(kSendingSsrc);
+ p.AddXrRrtrBlock();
+ p.AddCast(kSendingSsrc, kMediaSsrc);
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ RtcpReceiverReferenceTimeReport rrtr;
+ rrtr.ntp_seconds = kNtpHigh;
+ rrtr.ntp_fraction = kNtpLow;
+
+ RtcpCastMessage cast_message(kMediaSsrc);
+ cast_message.ack_frame_id_ = kAckFrameId;
+ PacketIdSet missing_packets;
+ cast_message.missing_frames_and_packets_[kLostFrameId] = missing_packets;
+
+ missing_packets.insert(kLostPacketId1);
+ missing_packets.insert(kLostPacketId2);
+ missing_packets.insert(kLostPacketId3);
+ cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
+ missing_packets;
+
+ // Test empty Log message.
+ RtcpReceiverLogMessage receiver_log;
+
+ VLOG(0) << " Test empty Log " ;
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast |
+ RtcpSender::kRtcpReceiverLog,
+ &report_block,
+ &rrtr,
+ &cast_message,
+ &receiver_log);
+
+
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ p.AddReceiverLog(kSendingSsrc);
+ p.AddReceiverFrameLog(kRtpTimestamp, 2, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ p.AddReceiverEventLog(kLostPacketId1, 6, kTimeDelayMs);
+
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ event_log.type = kPacketReceived;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.packet_id = kLostPacketId1;
+ frame_log.event_log_messages_.push_back(event_log);
+
+ receiver_log.push_back(frame_log);
+
+ VLOG(0) << " Test Log " ;
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpRrtr | RtcpSender::kRtcpCast |
+ RtcpSender::kRtcpReceiverLog,
+ &report_block,
+ &rrtr,
+ &cast_message,
+ &receiver_log);
+
+ EXPECT_TRUE(receiver_log.empty());
+ EXPECT_EQ(2, test_transport_.packet_count());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithOversizedFrameLog) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+
TestRtcpPacketBuilder p;
p.AddRr(kSendingSsrc, 1);
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- p.AddPli(kSendingSsrc, kMediaSsrc);
+
+ RtcpReportBlock report_block;
+ // Initialize remote_ssrc to a "clearly illegal" value.
+ report_block.remote_ssrc = 0xDEAD;
+ report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
+ report_block.fraction_lost = kLoss >> 24;
+ report_block.cumulative_lost = kLoss; // 24 bits valid.
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
+ report_block.last_sr = kLastSr;
+ report_block.delay_since_last_sr = kDelayLastSr;
+
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ p.AddReceiverLog(kSendingSsrc);
+
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ p.AddReceiverFrameLog(kRtpTimestamp + 2345,
+ kRtcpMaxReceiverLogMessages, kTimeBaseMs);
+
+ for (size_t i = 0; i < kRtcpMaxReceiverLogMessages; ++i) {
+ p.AddReceiverEventLog(
+ kLostPacketId1, 6, static_cast<uint16>(kTimeDelayMs * i));
+ }
+
test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+ RtcpReceiverFrameLogMessage frame_1_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_1_log.event_log_messages_.push_back(event_log);
+
+ RtcpReceiverLogMessage receiver_log;
+ receiver_log.push_back(frame_1_log);
+
+ RtcpReceiverFrameLogMessage frame_2_log(kRtpTimestamp + 2345);
+
+ for (int j = 0; j < 300; ++j) {
+ event_log.type = kPacketReceived;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.packet_id = kLostPacketId1;
+ frame_2_log.event_log_messages_.push_back(event_log);
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ }
+ receiver_log.push_back(frame_2_log);
+
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpReceiverLog,
+ &report_block,
+ NULL,
+ NULL,
+ &receiver_log);
+
+ EXPECT_EQ(1, test_transport_.packet_count());
+ EXPECT_EQ(1u, receiver_log.size());
+ EXPECT_EQ(300u - kRtcpMaxReceiverLogMessages,
+ receiver_log.front().event_log_messages_.size());
+}
+
+TEST_F(RtcpSenderTest, RtcpReceiverReportWithTooManyLogFrames) {
+ static const uint32 kTimeBaseMs = 12345678;
+ static const uint32 kTimeDelayMs = 10;
+ static const uint32 kDelayDeltaMs = 123;
+
+ TestRtcpPacketBuilder p;
+ p.AddRr(kSendingSsrc, 1);
+ p.AddRb(kMediaSsrc);
+ p.AddSdesCname(kSendingSsrc, kCName);
+
RtcpReportBlock report_block;
// Initialize remote_ssrc to a "clearly illegal" value.
report_block.remote_ssrc = 0xDEAD;
report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
report_block.fraction_lost = kLoss >> 24;
report_block.cumulative_lost = kLoss; // 24 bits valid.
- report_block.extended_high_sequence_number =
- kExtendedMax;
- report_block.jitter = kJitter;
+ report_block.extended_high_sequence_number = kExtendedMax;
+ report_block.jitter = kTestJitter;
report_block.last_sr = kLastSr;
report_block.delay_since_last_sr = kDelayLastSr;
- rtcp_sender_->SendRtcp(RtcpSender::kRtcpRr | RtcpSender::kRtcpPli,
- NULL,
- &report_block,
- kMediaSsrc,
- NULL,
- NULL,
- NULL);
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
+
+ p.AddReceiverLog(kSendingSsrc);
+
+ for (int i = 0; i < 119; ++i) {
+ p.AddReceiverFrameLog(kRtpTimestamp, 1, kTimeBaseMs + i * kTimeDelayMs);
+ p.AddReceiverEventLog(kDelayDeltaMs, 1, 0);
+ }
+ test_transport_.SetExpectedRtcpPacket(p.Packet(), p.Length());
+
+ RtcpReceiverLogMessage receiver_log;
+
+ for (int j = 0; j < 200; ++j) {
+ RtcpReceiverFrameLogMessage frame_log(kRtpTimestamp);
+ RtcpReceiverEventLogMessage event_log;
+
+ event_log.type = kAckSent;
+ event_log.event_timestamp = testing_clock.NowTicks();
+ event_log.delay_delta = base::TimeDelta::FromMilliseconds(kDelayDeltaMs);
+ frame_log.event_log_messages_.push_back(event_log);
+ receiver_log.push_back(frame_log);
+ testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeDelayMs));
+ }
+ rtcp_sender_->SendRtcpFromRtpReceiver(
+ RtcpSender::kRtcpRr | RtcpSender::kRtcpReceiverLog,
+ &report_block,
+ NULL,
+ NULL,
+ &receiver_log);
EXPECT_EQ(1, test_transport_.packet_count());
+ EXPECT_EQ(81u, receiver_log.size());
}
} // namespace cast
diff --git a/chromium/media/cast/rtcp/rtcp_unittest.cc b/chromium/media/cast/rtcp/rtcp_unittest.cc
index dfcc6ea910c..535f3c34f83 100644
--- a/chromium/media/cast/rtcp/rtcp_unittest.cc
+++ b/chromium/media/cast/rtcp/rtcp_unittest.cc
@@ -4,11 +4,13 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/fake_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -18,22 +20,26 @@ using testing::_;
static const uint32 kSenderSsrc = 0x10203;
static const uint32 kReceiverSsrc = 0x40506;
-static const uint32 kUnknownSsrc = 0xDEAD;
static const std::string kCName("test@10.1.1.1");
static const uint32 kRtcpIntervalMs = 500;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const int64 kAddedDelay = 123;
static const int64 kAddedShortDelay= 100;
class LocalRtcpTransport : public PacedPacketSender {
public:
- explicit LocalRtcpTransport(base::SimpleTestTickClock* testing_clock)
- : short_delay_(false),
+ explicit LocalRtcpTransport(scoped_refptr<CastEnvironment> cast_environment,
+ base::SimpleTestTickClock* testing_clock)
+ : drop_packets_(false),
+ short_delay_(false),
testing_clock_(testing_clock) {}
- void SetRtcpReceiver(Rtcp* rtcp) { rtcp_ = rtcp; }
+ void SetRtcpReceiver(Rtcp* rtcp) { rtcp_ = rtcp; }
+
+ void SetShortDelay() { short_delay_ = true; }
+
+ void SetDropPackets(bool drop_packets) { drop_packets_ = drop_packets; }
- void SetShortDelay() { short_delay_ = true; }
virtual bool SendRtcpPacket(const std::vector<uint8>& packet) OVERRIDE {
if (short_delay_) {
@@ -42,45 +48,49 @@ class LocalRtcpTransport : public PacedPacketSender {
} else {
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(kAddedDelay));
}
+ if (drop_packets_) return true;
+
rtcp_->IncomingRtcpPacket(&(packet[0]), packet.size());
return true;
}
- virtual bool SendPacket(const std::vector<uint8>& packet,
- int num_of_packets) OVERRIDE {
+ virtual bool SendPackets(const PacketList& packets) OVERRIDE {
return false;
}
- virtual bool ResendPacket(const std::vector<uint8>& packet,
- int num_of_packets) OVERRIDE {
+ virtual bool ResendPackets(const PacketList& packets) OVERRIDE {
return false;
}
private:
+ bool drop_packets_;
bool short_delay_;
Rtcp* rtcp_;
base::SimpleTestTickClock* testing_clock_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
class RtcpPeer : public Rtcp {
public:
- RtcpPeer(RtcpSenderFeedback* sender_feedback,
+ RtcpPeer(scoped_refptr<CastEnvironment> cast_environment,
+ RtcpSenderFeedback* sender_feedback,
PacedPacketSender* const paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
- bool sending_media,
uint32 local_ssrc,
+ uint32 remote_ssrc,
const std::string& c_name)
- : Rtcp(sender_feedback,
+ : Rtcp(cast_environment,
+ sender_feedback,
paced_packet_sender,
rtp_sender_statistics,
rtp_receiver_statistics,
rtcp_mode,
rtcp_interval,
- sending_media,
local_ssrc,
+ remote_ssrc,
c_name) {
}
@@ -91,40 +101,41 @@ class RtcpPeer : public Rtcp {
class RtcpTest : public ::testing::Test {
protected:
RtcpTest()
- : transport_(&testing_clock_) {
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ transport_(cast_environment_, &testing_clock_) {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
- ~RtcpTest() {}
+ virtual ~RtcpTest() {}
- void SetUp() {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRpsi(_, _)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedRemb(_)).Times(0);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedNackRequest(_)).Times(0);
+ virtual void SetUp() {
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
}
base::SimpleTestTickClock testing_clock_;
+ scoped_refptr<test::FakeTaskRunner> task_runner_;
+ scoped_refptr<CastEnvironment> cast_environment_;
LocalRtcpTransport transport_;
MockRtcpSenderFeedback mock_sender_feedback_;
};
TEST_F(RtcpTest, TimeToSend) {
- base::TimeTicks start_time =
- base::TimeTicks::FromInternalValue(kStartMillisecond * 1000);
- Rtcp rtcp(&mock_sender_feedback_,
+ base::TimeTicks start_time;
+ start_time += base::TimeDelta::FromMilliseconds(kStartMillisecond);
+ Rtcp rtcp(cast_environment_,
+ &mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true, // Media sender.
kSenderSsrc,
+ kReceiverSsrc,
kCName);
- rtcp.set_clock(&testing_clock_);
transport_.SetRtcpReceiver(&rtcp);
EXPECT_LE(start_time, rtcp.TimeToSendNextRtcpReport());
EXPECT_GE(start_time + base::TimeDelta::FromMilliseconds(
@@ -136,119 +147,147 @@ TEST_F(RtcpTest, TimeToSend) {
}
TEST_F(RtcpTest, BasicSenderReport) {
- Rtcp rtcp(&mock_sender_feedback_,
+ Rtcp rtcp(cast_environment_,
+ &mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true, // Media sender.
kSenderSsrc,
+ kReceiverSsrc,
kCName);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SendRtcpReport(kUnknownSsrc);
+ rtcp.SendRtcpFromRtpSender(NULL);
}
TEST_F(RtcpTest, BasicReceiverReport) {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
- Rtcp rtcp(&mock_sender_feedback_,
+ Rtcp rtcp(cast_environment_,
+ &mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpCompound,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false, // Media receiver.
- kSenderSsrc,
- kCName);
- transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
- rtcp.SendRtcpReport(kSenderSsrc);
-}
-
-TEST_F(RtcpTest, BasicPli) {
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(1);
- EXPECT_CALL(mock_sender_feedback_, OnReceivedIntraFrameRequest()).Times(1);
-
- // Media receiver.
- Rtcp rtcp(&mock_sender_feedback_,
- &transport_,
- NULL,
- NULL,
- kRtcpReducedSize,
- base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kSenderSsrc,
+ kReceiverSsrc,
kCName);
- rtcp.set_clock(&testing_clock_);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
- rtcp.SendRtcpPli(kSenderSsrc);
+ rtcp.SendRtcpFromRtpReceiver(NULL, NULL);
}
TEST_F(RtcpTest, BasicCast) {
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(1);
// Media receiver.
- Rtcp rtcp(&mock_sender_feedback_,
+ Rtcp rtcp(cast_environment_,
+ &mock_sender_feedback_,
&transport_,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
+ kSenderSsrc,
kSenderSsrc,
kCName);
- rtcp.set_clock(&testing_clock_);
transport_.SetRtcpReceiver(&rtcp);
- rtcp.SetRemoteSSRC(kSenderSsrc);
RtcpCastMessage cast_message(kSenderSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
- std::set<uint16_t> missing_packets;
+ PacketIdSet missing_packets;
cast_message.missing_frames_and_packets_[
kLostFrameId] = missing_packets;
missing_packets.insert(kLostPacketId1);
missing_packets.insert(kLostPacketId2);
missing_packets.insert(kLostPacketId3);
- cast_message.missing_frames_and_packets_[
- kFrameIdWithLostPackets] = missing_packets;
- rtcp.SendRtcpCast(cast_message);
+ cast_message.missing_frames_and_packets_[kFrameIdWithLostPackets] =
+ missing_packets;
+ rtcp.SendRtcpFromRtpReceiver(&cast_message, NULL);
}
-TEST_F(RtcpTest, Rtt) {
+TEST_F(RtcpTest, RttReducedSizeRtcp) {
// Media receiver.
- LocalRtcpTransport receiver_transport(&testing_clock_);
- Rtcp rtcp_receiver(&mock_sender_feedback_,
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
&receiver_transport,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
- rtcp_receiver.set_clock(&testing_clock_);
// Media sender.
- LocalRtcpTransport sender_transport(&testing_clock_);
- Rtcp rtcp_sender(&mock_sender_feedback_,
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
+ &mock_sender_feedback_,
&sender_transport,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- true,
kSenderSsrc,
+ kReceiverSsrc,
kCName);
- rtcp_sender.set_clock(&testing_clock_);
receiver_transport.SetRtcpReceiver(&rtcp_sender);
sender_transport.SetRtcpReceiver(&rtcp_receiver);
- rtcp_sender.SetRemoteSSRC(kReceiverSsrc);
- rtcp_receiver.SetRemoteSSRC(kSenderSsrc);
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- EXPECT_CALL(mock_sender_feedback_, OnReceivedReportBlock(_)).Times(2);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+}
+
+TEST_F(RtcpTest, Rtt) {
+ // Media receiver.
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &receiver_transport,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kReceiverSsrc,
+ kSenderSsrc,
+ kCName);
+
+ // Media sender.
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
+ &mock_sender_feedback_,
+ &sender_transport,
+ NULL,
+ NULL,
+ kRtcpCompound,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kSenderSsrc,
+ kReceiverSsrc,
+ kCName);
+
+ receiver_transport.SetRtcpReceiver(&rtcp_sender);
+ sender_transport.SetRtcpReceiver(&rtcp_receiver);
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
@@ -257,17 +296,17 @@ TEST_F(RtcpTest, Rtt) {
EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_FALSE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
@@ -275,58 +314,149 @@ TEST_F(RtcpTest, Rtt) {
receiver_transport.SetShortDelay();
sender_transport.SetShortDelay();
- rtcp_receiver.SendRtcpReport(kSenderSsrc);
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
-
EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR((kAddedShortDelay + 3 * kAddedDelay) / 2,
avg_rtt.InMilliseconds(),
1);
EXPECT_NEAR(kAddedDelay + kAddedShortDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
- rtcp_sender.SendRtcpReport(kSenderSsrc);
- EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
EXPECT_NEAR((2 * kAddedShortDelay + 2 * kAddedDelay) / 2,
avg_rtt.InMilliseconds(),
1);
- EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ EXPECT_TRUE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedShortDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedShortDelay, min_rtt.InMilliseconds(), 1);
EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
}
-TEST_F(RtcpTest, NtpAndTime) {
- RtcpPeer rtcp_peer(&mock_sender_feedback_,
- NULL,
+TEST_F(RtcpTest, RttWithPacketLoss) {
+ // Media receiver.
+ LocalRtcpTransport receiver_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_receiver(cast_environment_,
+ &mock_sender_feedback_,
+ &receiver_transport,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
+ kSenderSsrc,
kReceiverSsrc,
kCName);
- rtcp_peer.set_clock(&testing_clock_);
- uint32 ntp_seconds = 0;
- uint32 ntp_fractions = 0;
- base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
- 12345678901000LL + kNtpEpochDeltaMicroseconds);
- ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
- EXPECT_EQ(12345678u, ntp_seconds);
- EXPECT_EQ(input_time,
- ConvertNtpToTime(ntp_seconds, ntp_fractions));
+
+ // Media sender.
+ LocalRtcpTransport sender_transport(cast_environment_, &testing_clock_);
+ Rtcp rtcp_sender(cast_environment_,
+ &mock_sender_feedback_,
+ &sender_transport,
+ NULL,
+ NULL,
+ kRtcpReducedSize,
+ base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
+ kReceiverSsrc,
+ kSenderSsrc,
+ kCName);
+
+ receiver_transport.SetRtcpReceiver(&rtcp_sender);
+ sender_transport.SetRtcpReceiver(&rtcp_receiver);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+ EXPECT_FALSE(rtcp_sender.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(2 * kAddedDelay, rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, avg_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, min_rtt.InMilliseconds(), 1);
+ EXPECT_NEAR(2 * kAddedDelay, max_rtt.InMilliseconds(), 1);
+
+ receiver_transport.SetShortDelay();
+ sender_transport.SetShortDelay();
+ receiver_transport.SetDropPackets(true);
+
+ rtcp_receiver.SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_sender.SendRtcpFromRtpSender(NULL);
+
+ EXPECT_TRUE(rtcp_receiver.Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt));
+ EXPECT_NEAR(kAddedDelay + kAddedShortDelay, rtt.InMilliseconds(), 1);
+}
+
+TEST_F(RtcpTest, NtpAndTime) {
+ const int64 kSecondsbetweenYear1900and2010 = GG_INT64_C(40176 * 24 * 60 * 60);
+ const int64 kSecondsbetweenYear1900and2030 = GG_INT64_C(47481 * 24 * 60 * 60);
+
+ uint32 ntp_seconds_1 = 0;
+ uint32 ntp_fractions_1 = 0;
+ base::TimeTicks input_time = base::TimeTicks::Now();
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_1, &ntp_fractions_1);
+
+ // Verify absolute value.
+ EXPECT_GT(ntp_seconds_1, kSecondsbetweenYear1900and2010);
+ EXPECT_LT(ntp_seconds_1, kSecondsbetweenYear1900and2030);
+
+ base::TimeTicks out_1 = ConvertNtpToTimeTicks(ntp_seconds_1, ntp_fractions_1);
+ EXPECT_EQ(input_time, out_1); // Verify inverse.
+
+ base::TimeDelta time_delta = base::TimeDelta::FromMilliseconds(1000);
+ input_time += time_delta;
+
+ uint32 ntp_seconds_2 = 0;
+ uint32 ntp_fractions_2 = 0;
+
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_2, &ntp_fractions_2);
+ base::TimeTicks out_2 = ConvertNtpToTimeTicks(ntp_seconds_2, ntp_fractions_2);
+ EXPECT_EQ(input_time, out_2); // Verify inverse.
+
+ // Verify delta.
+ EXPECT_EQ((out_2 - out_1), time_delta);
+ EXPECT_EQ((ntp_seconds_2 - ntp_seconds_1), GG_UINT32_C(1));
+ EXPECT_NEAR(ntp_fractions_2, ntp_fractions_1, 1);
+
+ time_delta = base::TimeDelta::FromMilliseconds(500);
+ input_time += time_delta;
+
+ uint32 ntp_seconds_3 = 0;
+ uint32 ntp_fractions_3 = 0;
+
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds_3, &ntp_fractions_3);
+ base::TimeTicks out_3 = ConvertNtpToTimeTicks(ntp_seconds_3, ntp_fractions_3);
+ EXPECT_EQ(input_time, out_3); // Verify inverse.
+
+ // Verify delta.
+ EXPECT_EQ((out_3 - out_2), time_delta);
+ EXPECT_NEAR((ntp_fractions_3 - ntp_fractions_2), 0xffffffff / 2, 1);
}
TEST_F(RtcpTest, WrapAround) {
- RtcpPeer rtcp_peer(&mock_sender_feedback_,
+ RtcpPeer rtcp_peer(cast_environment_,
+ &mock_sender_feedback_,
NULL,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
- rtcp_peer.set_clock(&testing_clock_);
uint32 new_timestamp = 0;
uint32 old_timestamp = 0;
EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
@@ -337,24 +467,24 @@ TEST_F(RtcpTest, WrapAround) {
old_timestamp = 1234567890;
EXPECT_EQ(0, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
new_timestamp = 123;
- old_timestamp = 4234567890;
+ old_timestamp = 4234567890u;
EXPECT_EQ(1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
- new_timestamp = 4234567890;
+ new_timestamp = 4234567890u;
old_timestamp = 123;
EXPECT_EQ(-1, rtcp_peer.CheckForWrapAround(new_timestamp, old_timestamp));
}
TEST_F(RtcpTest, RtpTimestampInSenderTime) {
- RtcpPeer rtcp_peer(&mock_sender_feedback_,
+ RtcpPeer rtcp_peer(cast_environment_,
+ &mock_sender_feedback_,
NULL,
NULL,
NULL,
kRtcpReducedSize,
base::TimeDelta::FromMilliseconds(kRtcpIntervalMs),
- false,
kReceiverSsrc,
+ kSenderSsrc,
kCName);
- rtcp_peer.set_clock(&testing_clock_);
int frequency = 32000;
uint32 rtp_timestamp = 64000;
base::TimeTicks rtp_timestamp_in_ticks;
@@ -365,43 +495,44 @@ TEST_F(RtcpTest, RtpTimestampInSenderTime) {
uint32 ntp_seconds = 0;
uint32 ntp_fractions = 0;
- base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
- 12345678901000LL + kNtpEpochDeltaMicroseconds);
+ uint64 input_time_us = 12345678901000LL;
+ base::TimeTicks input_time;
+ input_time += base::TimeDelta::FromMicroseconds(input_time_us);
// Test exact match.
- ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ ConvertTimeTicksToNtp(input_time, &ntp_seconds, &ntp_fractions);
rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
+ &rtp_timestamp_in_ticks));
EXPECT_EQ(input_time, rtp_timestamp_in_ticks);
// Test older rtp_timestamp.
rtp_timestamp = 32000;
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
+ &rtp_timestamp_in_ticks));
EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(1000),
rtp_timestamp_in_ticks);
// Test older rtp_timestamp with wrap.
- rtp_timestamp = 4294903296;
+ rtp_timestamp = 4294903296u;
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
+ &rtp_timestamp_in_ticks));
EXPECT_EQ(input_time - base::TimeDelta::FromMilliseconds(4000),
rtp_timestamp_in_ticks);
// Test newer rtp_timestamp.
rtp_timestamp = 128000;
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
+ &rtp_timestamp_in_ticks));
EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(2000),
rtp_timestamp_in_ticks);
// Test newer rtp_timestamp with wrap.
- rtp_timestamp = 4294903296;
+ rtp_timestamp = 4294903296u;
rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
rtp_timestamp = 64000;
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
- &rtp_timestamp_in_ticks));
+ &rtp_timestamp_in_ticks));
EXPECT_EQ(input_time + base::TimeDelta::FromMilliseconds(4000),
rtp_timestamp_in_ticks);
}
diff --git a/chromium/media/cast/rtcp/rtcp_utility.cc b/chromium/media/cast/rtcp/rtcp_utility.cc
index 4f9d2ec7693..daeaa8aaceb 100644
--- a/chromium/media/cast/rtcp/rtcp_utility.cc
+++ b/chromium/media/cast/rtcp/rtcp_utility.cc
@@ -56,6 +56,15 @@ RtcpFieldTypes RtcpParser::Iterate() {
case kStateBye:
IterateByeItem();
break;
+ case kStateApplicationSpecificCastReceiverFrameLog:
+ IterateCastReceiverLogFrame();
+ break;
+ case kStateApplicationSpecificCastReceiverEventLog:
+ IterateCastReceiverLogEvent();
+ break;
+ case kStateApplicationSpecificCastSenderLog:
+ IterateCastSenderLog();
+ break;
case kStateExtendedReportBlock:
IterateExtendedReportItem();
break;
@@ -123,6 +132,12 @@ void RtcpParser::IterateTopLevel() {
break;
}
return;
+ case kPacketTypeApplicationDefined:
+ if (!ParseApplicationDefined(header.IC)) {
+ // Nothing supported found, continue to next block!
+ break;
+ }
+ return;
case kPacketTypeGenericRtpFeedback: // Fall through!
case kPacketTypePayloadSpecific:
if (!ParseFeedBackCommon(header)) {
@@ -203,6 +218,21 @@ void RtcpParser::IteratePayloadSpecificCastNackItem() {
if (!success) Iterate();
}
+void RtcpParser::IterateCastReceiverLogFrame() {
+ bool success = ParseCastReceiverLogFrameItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateCastReceiverLogEvent() {
+ bool success = ParseCastReceiverLogEventItem();
+ if (!success) Iterate();
+}
+
+void RtcpParser::IterateCastSenderLog() {
+ bool success = ParseCastSenderLogItem();
+ if (!success) Iterate();
+}
+
void RtcpParser::Validate() {
if (rtcp_data_ == NULL) return; // NOT VALID
@@ -466,6 +496,124 @@ bool RtcpParser::ParseByeItem() {
return true;
}
+bool RtcpParser::ParseApplicationDefined(uint8 subtype) {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 16 ||
+ !(subtype == kSenderLogSubtype || subtype == kReceiverLogSubtype)) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+
+ uint32 sender_ssrc;
+ uint32 name;
+
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.Skip(4); // Skip header.
+ big_endian_reader.ReadU32(&sender_ssrc);
+ big_endian_reader.ReadU32(&name);
+
+ if (name != kCast) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ rtcp_data_ += 12;
+ switch (subtype) {
+ case kSenderLogSubtype:
+ state_ = kStateApplicationSpecificCastSenderLog;
+ field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
+ field_.cast_sender_log.sender_ssrc = sender_ssrc;
+ break;
+ case kReceiverLogSubtype:
+ state_ = kStateApplicationSpecificCastReceiverFrameLog;
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogCode;
+ field_.cast_receiver_log.sender_ssrc = sender_ssrc;
+ break;
+ default:
+ NOTREACHED();
+ }
+ return true;
+}
+
+bool RtcpParser::ParseCastReceiverLogFrameItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 12) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ uint32 rtp_timestamp;
+ uint32 data;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&rtp_timestamp);
+ big_endian_reader.ReadU32(&data);
+
+ rtcp_data_ += 8;
+
+ field_.cast_receiver_log.rtp_timestamp = rtp_timestamp;
+ // We have 24 LSB of the event timestamp base on the wire.
+ field_.cast_receiver_log.event_timestamp_base = data & 0xffffff;
+
+ number_of_blocks_ = 1 + static_cast<uint8>(data >> 24);
+ state_ = kStateApplicationSpecificCastReceiverEventLog;
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogFrameCode;
+ return true;
+}
+
+bool RtcpParser::ParseCastReceiverLogEventItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ if (number_of_blocks_ == 0) {
+ // Continue parsing the next receiver frame event.
+ state_ = kStateApplicationSpecificCastReceiverFrameLog;
+ return false;
+ }
+ number_of_blocks_--;
+
+ uint16 delay_delta_or_packet_id;
+ uint16 event_type_and_timestamp_delta;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU16(&delay_delta_or_packet_id);
+ big_endian_reader.ReadU16(&event_type_and_timestamp_delta);
+
+ rtcp_data_ += 4;
+
+ field_.cast_receiver_log.event =
+ static_cast<uint8>(event_type_and_timestamp_delta >> 12);
+ field_.cast_receiver_log.delay_delta_or_packet_id = delay_delta_or_packet_id;
+ field_.cast_receiver_log.event_timestamp_delta =
+ event_type_and_timestamp_delta & 0xfff;
+
+ field_type_ = kRtcpApplicationSpecificCastReceiverLogEventCode;
+ return true;
+}
+
+bool RtcpParser::ParseCastSenderLogItem() {
+ ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
+
+ if (length < 4) {
+ state_ = kStateTopLevel;
+ EndCurrentBlock();
+ return false;
+ }
+ uint32 data;
+ net::BigEndianReader big_endian_reader(rtcp_data_, length);
+ big_endian_reader.ReadU32(&data);
+
+ rtcp_data_ += 4;
+
+ field_.cast_sender_log.status = static_cast<uint8>(data >> 24);
+ // We have 24 LSB of the RTP timestamp on the wire.
+ field_.cast_sender_log.rtp_timestamp = data & 0xffffff;
+ field_type_ = kRtcpApplicationSpecificCastSenderLogCode;
+ return true;
+}
+
bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
DCHECK((header.PT == kPacketTypeGenericRtpFeedback) ||
(header.PT == kPacketTypePayloadSpecific)) << "Invalid state";
@@ -686,7 +834,6 @@ bool RtcpParser::ParsePayloadSpecificRembItem() {
bool RtcpParser::ParsePayloadSpecificCastItem() {
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
-
if (length < 4) {
state_ = kStateTopLevel;
EndCurrentBlock();
diff --git a/chromium/media/cast/rtcp/rtcp_utility.h b/chromium/media/cast/rtcp/rtcp_utility.h
index 2df13e7aed9..5cf55d91060 100644
--- a/chromium/media/cast/rtcp/rtcp_utility.h
+++ b/chromium/media/cast/rtcp/rtcp_utility.h
@@ -13,12 +13,21 @@ namespace media {
namespace cast {
static const int kRtcpRpsiDataSize = 30;
-static const int kRtcpCnameSize = 256; // RFC 3550 page 44, including end null.
+
+// RFC 3550 page 44, including end null.
+static const size_t kRtcpCnameSize = 256;
static const int kRtcpMaxNumberOfRembFeedbackSsrcs = 255;
static const uint32 kRemb = ('R' << 24) + ('E' << 16) + ('M' << 8) + 'B';
static const uint32 kCast = ('C' << 24) + ('A' << 16) + ('S' << 8) + 'T';
+static const uint8 kSenderLogSubtype = 1;
+static const uint8 kReceiverLogSubtype = 2;
+
+static const size_t kRtcpMaxReceiverLogMessages = 256;
+static const size_t kRtcpMaxNackFields = 253;
+static const size_t kRtcpMaxCastLossFields = 100;
+
struct RtcpFieldReceiverReport {
// RFC 3550.
uint32 sender_ssrc;
@@ -137,6 +146,21 @@ struct RtcpFieldPayloadSpecificCastNackItem {
uint8 bitmask;
};
+struct RtcpFieldApplicationSpecificCastReceiverLogItem {
+ uint32 sender_ssrc;
+ uint32 rtp_timestamp;
+ uint32 event_timestamp_base;
+ uint8 event;
+ uint16 delay_delta_or_packet_id;
+ uint16 event_timestamp_delta;
+};
+
+struct RtcpFieldApplicationSpecificCastSenderLogItem {
+ uint32 sender_ssrc;
+ uint8 status;
+ uint32 rtp_timestamp;
+};
+
union RtcpField {
RtcpFieldReceiverReport receiver_report;
RtcpFieldSenderReport sender_report;
@@ -159,6 +183,9 @@ union RtcpField {
RtcpFieldPayloadSpecificRembItem remb_item;
RtcpFieldPayloadSpecificCastItem cast_item;
RtcpFieldPayloadSpecificCastNackItem cast_nack_item;
+
+ RtcpFieldApplicationSpecificCastReceiverLogItem cast_receiver_log;
+ RtcpFieldApplicationSpecificCastSenderLogItem cast_sender_log;
};
enum RtcpFieldTypes {
@@ -187,10 +214,15 @@ enum RtcpFieldTypes {
kRtcpPayloadSpecificRpsiCode,
kRtcpPayloadSpecificAppCode,
+ // Application specific.
kRtcpPayloadSpecificRembCode,
kRtcpPayloadSpecificRembItemCode,
kRtcpPayloadSpecificCastCode,
kRtcpPayloadSpecificCastNackItemCode,
+ kRtcpApplicationSpecificCastReceiverLogCode,
+ kRtcpApplicationSpecificCastReceiverLogFrameCode,
+ kRtcpApplicationSpecificCastReceiverLogEventCode,
+ kRtcpApplicationSpecificCastSenderLogCode,
// RFC 5104.
kRtcpPayloadSpecificFirCode,
@@ -213,11 +245,11 @@ enum RtcpPacketTypes {
kPacketTypeInterArrivalJitterReport = 195,
kPacketTypeSenderReport = 200,
kPacketTypeReceiverReport = 201,
- kPacketTypeSdes= 202,
+ kPacketTypeSdes = 202,
kPacketTypeBye = 203,
kPacketTypeApplicationDefined = 204,
kPacketTypeGenericRtpFeedback = 205,
- kPacketTypePayloadSpecific = 206,
+ kPacketTypePayloadSpecific = 206,
kPacketTypeXr = 207,
kPacketTypeHigh = 210, // Port Mapping.
};
@@ -241,6 +273,9 @@ class RtcpParser {
kStateReportBlock, // Sender/Receiver report report blocks.
kStateSdes,
kStateBye,
+ kStateApplicationSpecificCastReceiverFrameLog,
+ kStateApplicationSpecificCastReceiverEventLog,
+ kStateApplicationSpecificCastSenderLog,
kStateExtendedReportBlock,
kStateExtendedReportDelaySinceLastReceiverReport,
kStateGenericRtpFeedbackNack,
@@ -260,6 +295,9 @@ class RtcpParser {
void IterateReportBlockItem();
void IterateSdesItem();
void IterateByeItem();
+ void IterateCastReceiverLogFrame();
+ void IterateCastReceiverLogEvent();
+ void IterateCastSenderLog();
void IterateExtendedReportItem();
void IterateExtendedReportDelaySinceLastReceiverReportItem();
void IterateNackItem();
@@ -282,6 +320,10 @@ class RtcpParser {
bool ParseSdesTypes();
bool ParseBye();
bool ParseByeItem();
+ bool ParseApplicationDefined(uint8 subtype);
+ bool ParseCastReceiverLogFrameItem();
+ bool ParseCastReceiverLogEventItem();
+ bool ParseCastSenderLogItem();
bool ParseExtendedReport();
bool ParseExtendedReportItem();
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
index d6468e53a4e..f4117f53dec 100644
--- a/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.cc
@@ -25,6 +25,19 @@ void TestRtcpPacketBuilder::AddSr(uint32 sender_ssrc,
big_endian_writer_.WriteU32(kSendOctetCount);
}
+void TestRtcpPacketBuilder::AddSrWithNtp(uint32 sender_ssrc,
+ uint32 ntp_high,
+ uint32 ntp_low,
+ uint32 rtp_timestamp) {
+ AddRtcpHeader(200, 0);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU32(ntp_high);
+ big_endian_writer_.WriteU32(ntp_low);
+ big_endian_writer_.WriteU32(rtp_timestamp);
+ big_endian_writer_.WriteU32(kSendPacketCount);
+ big_endian_writer_.WriteU32(kSendOctetCount);
+}
+
void TestRtcpPacketBuilder::AddRr(uint32 sender_ssrc,
int number_of_report_blocks) {
AddRtcpHeader(201, number_of_report_blocks);
@@ -35,7 +48,7 @@ void TestRtcpPacketBuilder::AddRb(uint32 rtp_ssrc) {
big_endian_writer_.WriteU32(rtp_ssrc);
big_endian_writer_.WriteU32(kLoss);
big_endian_writer_.WriteU32(kExtendedMax);
- big_endian_writer_.WriteU32(kJitter);
+ big_endian_writer_.WriteU32(kTestJitter);
big_endian_writer_.WriteU32(kLastSr);
big_endian_writer_.WriteU32(kDelayLastSr);
}
@@ -45,7 +58,10 @@ void TestRtcpPacketBuilder::AddSdesCname(uint32 sender_ssrc,
AddRtcpHeader(202, 1);
big_endian_writer_.WriteU32(sender_ssrc);
big_endian_writer_.WriteU8(1); // c_name.
- big_endian_writer_.WriteU8(c_name.size()); // c_name length in bytes.
+
+ DCHECK_LE(c_name.size(), 255u);
+ big_endian_writer_.WriteU8(
+ static_cast<uint8>(c_name.size())); // c_name length in bytes.
for (size_t i = 0; i < c_name.size(); ++i) {
big_endian_writer_.WriteU8(c_name.c_str()[i]);
}
@@ -169,7 +185,7 @@ void TestRtcpPacketBuilder::AddRemb(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8(1); // Number of SSRCs.
big_endian_writer_.WriteU8(1); // BR Exp.
// BR Mantissa.
- big_endian_writer_.WriteU16(static_cast<uint16>(kRembBitrate / 2));
+ big_endian_writer_.WriteU16(static_cast<uint16>(kTestRembBitrate / 2));
big_endian_writer_.WriteU32(media_ssrc);
}
@@ -195,6 +211,47 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc, uint32 media_ssrc) {
big_endian_writer_.WriteU8(0); // Lost packet id mask.
}
+void TestRtcpPacketBuilder::AddSenderLog(uint32 sender_ssrc) {
+ AddRtcpHeader(204, 1);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU8('C');
+ big_endian_writer_.WriteU8('A');
+ big_endian_writer_.WriteU8('S');
+ big_endian_writer_.WriteU8('T');
+}
+
+void TestRtcpPacketBuilder::AddSenderFrameLog(uint8 event_id,
+ uint32 rtp_timestamp) {
+ big_endian_writer_.WriteU32(
+ (static_cast<uint32>(event_id) << 24) + (rtp_timestamp & 0xffffff));
+}
+
+void TestRtcpPacketBuilder::AddReceiverLog(uint32 sender_ssrc) {
+ AddRtcpHeader(204, 2);
+ big_endian_writer_.WriteU32(sender_ssrc);
+ big_endian_writer_.WriteU8('C');
+ big_endian_writer_.WriteU8('A');
+ big_endian_writer_.WriteU8('S');
+ big_endian_writer_.WriteU8('T');
+}
+
+void TestRtcpPacketBuilder::AddReceiverFrameLog(uint32 rtp_timestamp,
+ int num_events, uint32 event_timesamp_base) {
+ big_endian_writer_.WriteU32(rtp_timestamp);
+ big_endian_writer_.WriteU8(static_cast<uint8>(num_events - 1));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base >> 16));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base >> 8));
+ big_endian_writer_.WriteU8(static_cast<uint8>(event_timesamp_base));
+}
+
+void TestRtcpPacketBuilder::AddReceiverEventLog(uint16 event_data,
+ uint8 event_id, uint16 event_timesamp_delta) {
+ big_endian_writer_.WriteU16(event_data);
+ uint16 type_and_delta = static_cast<uint16>(event_id) << 12;
+ type_and_delta += event_timesamp_delta & 0x0fff;
+ big_endian_writer_.WriteU16(type_and_delta);
+}
+
const uint8* TestRtcpPacketBuilder::Packet() {
PatchLengthField();
return buffer_;
diff --git a/chromium/media/cast/rtcp/test_rtcp_packet_builder.h b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
index be93f0adb3c..9b63a37fa4a 100644
--- a/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
+++ b/chromium/media/cast/rtcp/test_rtcp_packet_builder.h
@@ -13,17 +13,20 @@
namespace media {
namespace cast {
+// These values are arbitrary only for the purpose of testing.
+
+namespace {
// Sender report.
static const int kNtpHigh = 0x01020304;
static const int kNtpLow = 0x05060708;
-static const int kRtpTimestamp = 0x10203;
+static const int kRtpTimestamp = 0x10203040;
static const int kSendPacketCount = 987;
static const int kSendOctetCount = 87654;
// Report block.
static const int kLoss = 0x01000123;
static const int kExtendedMax = 0x15678;
-static const int kJitter = 0x10203;
+static const int kTestJitter = 0x10203;
static const int kLastSr = 0x34561234;
static const int kDelayLastSr = 1000;
@@ -32,7 +35,7 @@ static const int kLastRr = 0x34561234;
static const int kDelayLastRr = 1000;
// REMB.
-static const int kRembBitrate = 524286;
+static const int kTestRembBitrate = 52428;
// RPSI.
static const int kPayloadtype = 126;
@@ -42,18 +45,21 @@ static const uint64 kPictureId = 0x1234567890;
static const int kMissingPacket = 34567;
// CAST.
-static const int kAckFrameId = 17;
-static const int kLostFrameId = 18;
-static const int kFrameIdWithLostPackets = 19;
+static const uint32 kAckFrameId = 17;
+static const uint32 kLostFrameId = 18;
+static const uint32 kFrameIdWithLostPackets = 19;
static const int kLostPacketId1 = 3;
static const int kLostPacketId2 = 5;
static const int kLostPacketId3 = 12;
+} // namespace
class TestRtcpPacketBuilder {
public:
TestRtcpPacketBuilder();
void AddSr(uint32 sender_ssrc, int number_of_report_blocks);
+ void AddSrWithNtp(uint32 sender_ssrc, uint32 ntp_high, uint32 ntp_low,
+ uint32 rtp_timestamp);
void AddRr(uint32 sender_ssrc, int number_of_report_blocks);
void AddRb(uint32 rtp_ssrc);
void AddSdesCname(uint32 sender_ssrc, const std::string& c_name);
@@ -71,6 +77,13 @@ class TestRtcpPacketBuilder {
void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
void AddCast(uint32 sender_ssrc, uint32 media_ssrc);
+ void AddSenderLog(uint32 sender_ssrc);
+ void AddSenderFrameLog(uint8 event_id, uint32 rtp_timestamp);
+ void AddReceiverLog(uint32 sender_ssrc);
+ void AddReceiverFrameLog(uint32 rtp_timestamp, int num_events,
+ uint32 event_timesamp_base);
+ void AddReceiverEventLog(uint16 event_data, uint8 event_id,
+ uint16 event_timesamp_delta);
const uint8* Packet();
int Length() { return kIpPacketSize - big_endian_writer_.remaining(); }
@@ -90,5 +103,3 @@ class TestRtcpPacketBuilder {
} // namespace media
#endif // MEDIA_CAST_RTCP_TEST_RTCP_PACKET_BUILDER_H_
-
-
diff --git a/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
new file mode 100644
index 00000000000..8681d087aa3
--- /dev/null
+++ b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/rtp_receiver/mock_rtp_payload_feedback.h"
+
+namespace media {
+namespace cast {
+
+MockRtpPayloadFeedback::MockRtpPayloadFeedback() {
+}
+
+MockRtpPayloadFeedback::~MockRtpPayloadFeedback() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
index d962ff895c5..003b67bc0da 100644
--- a/chromium/media/cast/rtp_common/mock_rtp_payload_feedback.h
+++ b/chromium/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
@@ -5,7 +5,7 @@
#ifndef MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
#define MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -13,6 +13,9 @@ namespace cast {
class MockRtpPayloadFeedback : public RtpPayloadFeedback {
public:
+ MockRtpPayloadFeedback();
+ virtual ~MockRtpPayloadFeedback();
+
MOCK_METHOD1(CastFeedback,
void(const RtcpCastMessage& cast_feedback));
};
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.cc b/chromium/media/cast/rtp_receiver/receiver_stats.cc
index 44a9b810075..9d34583a769 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats.cc
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.cc
@@ -5,24 +5,22 @@
#include "media/cast/rtp_receiver/receiver_stats.h"
#include "base/logging.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
static const uint32 kMaxSequenceNumber = 65536;
-ReceiverStats::ReceiverStats(uint32 ssrc)
- : ssrc_(ssrc),
+ReceiverStats::ReceiverStats(base::TickClock* clock)
+ : clock_(clock),
min_sequence_number_(0),
max_sequence_number_(0),
total_number_packets_(0),
sequence_number_cycles_(0),
interval_min_sequence_number_(0),
interval_number_packets_(0),
- interval_wrap_count_(0),
- default_tick_clock_(),
- clock_(&default_tick_clock_) {}
+ interval_wrap_count_(0) {}
ReceiverStats::~ReceiverStats() {}
@@ -66,7 +64,7 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
*extended_high_sequence_number = (sequence_number_cycles_ << 16) +
max_sequence_number_;
- *jitter = static_cast<uint32>(abs(jitter_.InMilliseconds()));
+ *jitter = static_cast<uint32>(abs(jitter_.InMillisecondsRoundedUp()));
// Reset interval values.
interval_min_sequence_number_ = 0;
@@ -75,8 +73,6 @@ void ReceiverStats::GetStatistics(uint8* fraction_lost,
}
void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
- if (ssrc_ != header.webrtc.header.ssrc) return;
-
uint16 new_seq_num = header.webrtc.header.sequenceNumber;
if (interval_number_packets_ == 0) {
@@ -105,7 +101,7 @@ void ReceiverStats::UpdateStatistics(const RtpCastHeader& header) {
if (total_number_packets_ > 0) {
// Update jitter.
base::TimeDelta delta = (now - last_received_packet_time_) -
- ((delta_new_timestamp - last_received_timestamp_) / 90000);
+ ((delta_new_timestamp - last_received_timestamp_) / 90);
jitter_ += (delta - jitter_) / 16;
}
last_received_timestamp_ = delta_new_timestamp;
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats.h b/chromium/media/cast/rtp_receiver/receiver_stats.h
index 610f515c0e2..c91ee507e0c 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats.h
+++ b/chromium/media/cast/rtp_receiver/receiver_stats.h
@@ -5,30 +5,26 @@
#ifndef MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
#define MEDIA_CAST_RTP_RECEIVER_RECEIVER_STATS_H_
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
class ReceiverStats {
public:
- explicit ReceiverStats(uint32 ssrc);
+ explicit ReceiverStats(base::TickClock* clock);
~ReceiverStats();
+
void GetStatistics(uint8* fraction_lost,
uint32* cumulative_lost, // 24 bits valid.
uint32* extended_high_sequence_number,
uint32* jitter);
void UpdateStatistics(const RtpCastHeader& header);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- }
-
private:
- const uint32 ssrc_;
+ base::TickClock* const clock_; // Not owned by this class.
// Global metrics.
uint16 min_sequence_number_;
@@ -43,8 +39,6 @@ class ReceiverStats {
int interval_min_sequence_number_;
int interval_number_packets_;
int interval_wrap_count_;
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
index c6cf91ab072..2788cb592de 100644
--- a/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
+++ b/chromium/media/cast/rtp_receiver/receiver_stats_unittest.cc
@@ -6,20 +6,19 @@
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/receiver_stats.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
static const uint32 kStdTimeIncrementMs = 33;
-static const uint32 kSsrc = 0x1234;
class ReceiverStatsTest : public ::testing::Test {
protected:
ReceiverStatsTest()
- : stats_(kSsrc),
+ : stats_(&testing_clock_),
rtp_header_(),
fraction_lost_(0),
cumulative_lost_(0),
@@ -30,12 +29,11 @@ class ReceiverStatsTest : public ::testing::Test {
start_time_ = testing_clock_.NowTicks();
delta_increments_ = base::TimeDelta::FromMilliseconds(kStdTimeIncrementMs);
}
- ~ReceiverStatsTest() {}
+ virtual ~ReceiverStatsTest() {}
virtual void SetUp() {
rtp_header_.webrtc.header.sequenceNumber = 0;
rtp_header_.webrtc.header.timestamp = 0;
- rtp_header_.webrtc.header.ssrc = kSsrc;
}
uint32 ExpectedJitter(uint32 const_interval, int num_packets) {
@@ -49,11 +47,6 @@ class ReceiverStatsTest : public ::testing::Test {
return static_cast<uint32>(jitter + 0.5f);
}
- uint32 Timestamp() {
- base::TimeDelta delta = testing_clock_.NowTicks() - start_time_;
- return static_cast<uint32>(delta.InMilliseconds() * 90);
- }
-
ReceiverStats stats_;
RtpCastHeader rtp_header_;
uint8 fraction_lost_;
@@ -79,7 +72,7 @@ TEST_F(ReceiverStatsTest, LossCount) {
if (i % 4)
stats_.UpdateStatistics(rtp_header_);
if (i % 3) {
- rtp_header_.webrtc.header.timestamp = Timestamp();
+ rtp_header_.webrtc.header.timestamp += 33 * 90;
}
++rtp_header_.webrtc.header.sequenceNumber;
testing_clock_.Advance(delta_increments_);
@@ -98,7 +91,7 @@ TEST_F(ReceiverStatsTest, NoLossWrap) {
for (int i = 0; i < 300; ++i) {
stats_.UpdateStatistics(rtp_header_);
if (i % 3) {
- rtp_header_.webrtc.header.timestamp = Timestamp();
+ rtp_header_.webrtc.header.timestamp += 33 * 90;
}
++rtp_header_.webrtc.header.sequenceNumber;
testing_clock_.Advance(delta_increments_);
@@ -135,8 +128,7 @@ TEST_F(ReceiverStatsTest, LossCountWrap) {
EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
}
-TEST_F(ReceiverStatsTest, Jitter) {
- rtp_header_.webrtc.header.timestamp = Timestamp();
+TEST_F(ReceiverStatsTest, BasicJitter) {
for (int i = 0; i < 300; ++i) {
stats_.UpdateStatistics(rtp_header_);
++rtp_header_.webrtc.header.sequenceNumber;
@@ -153,5 +145,26 @@ TEST_F(ReceiverStatsTest, Jitter) {
EXPECT_EQ(ExpectedJitter(kStdTimeIncrementMs, 300), jitter_);
}
+TEST_F(ReceiverStatsTest, NonTrivialJitter) {
+ const int kAdditionalIncrement = 5;
+ for (int i = 0; i < 300; ++i) {
+ stats_.UpdateStatistics(rtp_header_);
+ ++rtp_header_.webrtc.header.sequenceNumber;
+ rtp_header_.webrtc.header.timestamp += 33 * 90;
+ base::TimeDelta additional_delta =
+ base::TimeDelta::FromMilliseconds(kAdditionalIncrement);
+ testing_clock_.Advance(delta_increments_ + additional_delta);
+ }
+ stats_.GetStatistics(&fraction_lost_, &cumulative_lost_,
+ &extended_high_sequence_number_, &jitter_);
+ EXPECT_FALSE(fraction_lost_);
+ EXPECT_FALSE(cumulative_lost_);
+ // Build extended sequence number (one wrap cycle).
+ uint32 extended_seq_num = rtp_header_.webrtc.header.sequenceNumber - 1;
+ EXPECT_EQ(extended_seq_num, extended_high_sequence_number_);
+ EXPECT_EQ(
+ ExpectedJitter(kStdTimeIncrementMs + kAdditionalIncrement, 300), jitter_);
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
index d39bc2a2559..b6647a835be 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
@@ -34,4 +34,4 @@ class MockRtpFeedback : public RtpFeedback {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_ \ No newline at end of file
+#endif // MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
index 0eb691be7af..6ef20fe64e3 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
@@ -12,21 +12,19 @@
namespace media {
namespace cast {
-static const int kRtpCommonHeaderLength = 12;
-static const int kRtpCastHeaderLength = 7;
+static const size_t kRtpCommonHeaderLength = 12;
+static const size_t kRtpCastHeaderLength = 7;
static const uint8 kCastKeyFrameBitMask = 0x80;
static const uint8 kCastReferenceFrameIdBitMask = 0x40;
RtpParser::RtpParser(RtpData* incoming_payload_callback,
const RtpParserConfig parser_config)
: data_callback_(incoming_payload_callback),
- parser_config_(parser_config) {
-}
+ parser_config_(parser_config) {}
-RtpParser::~RtpParser() {
-}
+RtpParser::~RtpParser() {}
-bool RtpParser::ParsePacket(const uint8* packet, int length,
+bool RtpParser::ParsePacket(const uint8* packet, size_t length,
RtpCastHeader* rtp_header) {
if (length == 0) return false;
// Get RTP general header.
@@ -41,7 +39,7 @@ bool RtpParser::ParsePacket(const uint8* packet, int length,
}
bool RtpParser::ParseCommon(const uint8* packet,
- int length,
+ size_t length,
RtpCastHeader* rtp_header) {
if (length < kRtpCommonHeaderLength) return false;
uint8 version = packet[0] >> 6;
@@ -52,11 +50,13 @@ bool RtpParser::ParseCommon(const uint8* packet,
uint16 sequence_number;
uint32 rtp_timestamp, ssrc;
- net::BigEndianReader big_endian_reader(packet + 2, 80);
+ net::BigEndianReader big_endian_reader(packet + 2, 10);
big_endian_reader.ReadU16(&sequence_number);
big_endian_reader.ReadU32(&rtp_timestamp);
big_endian_reader.ReadU32(&ssrc);
+ if (ssrc != parser_config_.ssrc) return false;
+
rtp_header->webrtc.header.markerBit = marker;
rtp_header->webrtc.header.payloadType = payload_type;
rtp_header->webrtc.header.sequenceNumber = sequence_number;
@@ -69,26 +69,29 @@ bool RtpParser::ParseCommon(const uint8* packet,
rtp_header->webrtc.header.headerLength = kRtpCommonHeaderLength + csrc_octs;
rtp_header->webrtc.type.Audio.isCNG = false;
rtp_header->webrtc.type.Audio.channel = parser_config_.audio_channels;
+ // TODO(pwestin): look at x bit and skip data.
return true;
}
bool RtpParser::ParseCast(const uint8* packet,
- int length,
+ size_t length,
RtpCastHeader* rtp_header) {
if (length < kRtpCastHeaderLength) return false;
+
// Extract header.
const uint8* data_ptr = packet;
- int data_length = length;
+ size_t data_length = length;
rtp_header->is_key_frame = (data_ptr[0] & kCastKeyFrameBitMask);
rtp_header->is_reference = (data_ptr[0] & kCastReferenceFrameIdBitMask);
- rtp_header->frame_id = data_ptr[1];
+ rtp_header->frame_id = frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[1]);
- net::BigEndianReader big_endian_reader(data_ptr + 2, 32);
+ net::BigEndianReader big_endian_reader(data_ptr + 2, 4);
big_endian_reader.ReadU16(&rtp_header->packet_id);
big_endian_reader.ReadU16(&rtp_header->max_packet_id);
if (rtp_header->is_reference) {
- rtp_header->reference_frame_id = data_ptr[6];
+ rtp_header->reference_frame_id =
+ reference_frame_id_wrap_helper_.MapTo32bitsFrameId(data_ptr[6]);
data_ptr += kRtpCastHeaderLength;
data_length -= kRtpCastHeaderLength;
} else {
@@ -96,9 +99,8 @@ bool RtpParser::ParseCast(const uint8* packet,
data_length -= kRtpCastHeaderLength - 1;
}
- if (rtp_header->max_packet_id < rtp_header->packet_id) {
- return false;
- }
+ if (rtp_header->max_packet_id < rtp_header->packet_id) return false;
+
data_callback_->OnReceivedPayloadData(data_ptr, data_length, rtp_header);
return true;
}
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
index 0814e55cf81..258b0bff532 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gypi
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
@@ -12,13 +12,11 @@
'<(DEPTH)/third_party/',
],
'sources': [
- 'rtp_parser_config.h',
'rtp_parser.cc',
'rtp_parser.h',
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
index 7f85609bf6d..33bc92a6e6e 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
@@ -5,7 +5,8 @@
#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/net/cast_net_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -33,18 +34,20 @@ class RtpParser {
~RtpParser();
- bool ParsePacket(const uint8* packet, int length,
+ bool ParsePacket(const uint8* packet, size_t length,
RtpCastHeader* rtp_header);
private:
- bool ParseCommon(const uint8* packet, int length,
+ bool ParseCommon(const uint8* packet, size_t length,
RtpCastHeader* rtp_header);
- bool ParseCast(const uint8* packet, int length,
+ bool ParseCast(const uint8* packet, size_t length,
RtpCastHeader* rtp_header);
RtpData* data_callback_;
RtpParserConfig parser_config_;
+ FrameIdWrapHelper frame_id_wrap_helper_;
+ FrameIdWrapHelper reference_frame_id_wrap_helper_;
};
} // namespace cast
diff --git a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
index 71e6f501a52..c0f91d10fff 100644
--- a/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
+++ b/chromium/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
@@ -5,16 +5,15 @@
#include <gtest/gtest.h>
#include "base/memory/scoped_ptr.h"
-#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
-static const int kPacketLength = 1500;
-static const int kCastRtpLength = 7;
+static const size_t kPacketLength = 1500;
static const int kTestPayloadType = 127;
static const uint32 kTestSsrc = 1234;
static const uint32 kTestTimestamp = 111111;
@@ -27,18 +26,17 @@ class RtpDataTest : public RtpData {
expected_header_.reset(new RtpCastHeader());
}
- ~RtpDataTest() {}
+ virtual ~RtpDataTest() {}
void SetExpectedHeader(const RtpCastHeader& cast_header) {
memcpy(expected_header_.get(), &cast_header, sizeof(RtpCastHeader));
}
- void OnReceivedPayloadData(const uint8* payloadData,
- int payloadSize,
- const RtpCastHeader* rtpHeader) {
+ virtual void OnReceivedPayloadData(const uint8* payloadData,
+ size_t payloadSize,
+ const RtpCastHeader* rtpHeader) OVERRIDE {
VerifyCommonHeader(*rtpHeader);
VerifyCastHeader(*rtpHeader);
- // TODO(mikhal): Add data verification.
}
void VerifyCommonHeader(const RtpCastHeader& parsed_header) {
@@ -69,10 +67,9 @@ class RtpParserTest : public ::testing::Test {
rtp_parser_.reset(new RtpParser(rtp_data_.get(), config_));
}
- ~RtpParserTest() {}
+ virtual ~RtpParserTest() {}
virtual void SetUp() {
- cast_header_.InitRTPVideoHeaderCast();
cast_header_.is_reference = true;
cast_header_.reference_frame_id = kRefFrameId;
packet_builder_.SetSsrc(kTestSsrc);
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.cc b/chromium/media/cast/rtp_receiver/rtp_receiver.cc
index 97e9b03032c..3c804d9bd9b 100644
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.cc
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.cc
@@ -5,18 +5,21 @@
#include "media/cast/rtp_receiver/rtp_receiver.h"
#include "base/logging.h"
-#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/receiver_stats.h"
#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "net/base/big_endian.h"
namespace media {
namespace cast {
-RtpReceiver::RtpReceiver(const AudioReceiverConfig* audio_config,
+RtpReceiver::RtpReceiver(base::TickClock* clock,
+ const AudioReceiverConfig* audio_config,
const VideoReceiverConfig* video_config,
RtpData* incoming_payload_callback) {
DCHECK(incoming_payload_callback) << "Invalid argument";
DCHECK(audio_config || video_config) << "Invalid argument";
+
// Configure parser.
RtpParserConfig config;
if (audio_config) {
@@ -29,13 +32,23 @@ RtpReceiver::RtpReceiver(const AudioReceiverConfig* audio_config,
config.payload_type = video_config->rtp_payload_type;
config.video_codec = video_config->codec;
}
- stats_.reset(new ReceiverStats(config.ssrc));
+ stats_.reset(new ReceiverStats(clock));
parser_.reset(new RtpParser(incoming_payload_callback, config));
}
RtpReceiver::~RtpReceiver() {}
-bool RtpReceiver::ReceivedPacket(const uint8* packet, int length) {
+// static
+uint32 RtpReceiver::GetSsrcOfSender(const uint8* rtcp_buffer, size_t length) {
+ DCHECK_GE(length, kMinLengthOfRtp) << "Invalid RTP packet";
+ uint32 ssrc_of_sender;
+ net::BigEndianReader big_endian_reader(rtcp_buffer, length);
+ big_endian_reader.Skip(8); // Skip header
+ big_endian_reader.ReadU32(&ssrc_of_sender);
+ return ssrc_of_sender;
+}
+
+bool RtpReceiver::ReceivedPacket(const uint8* packet, size_t length) {
RtpCastHeader rtp_header;
if (!parser_->ParsePacket(packet, length, &rtp_header)) return false;
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.gyp b/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
index c1d4d5adf05..b612964c070 100644
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.gyp
@@ -19,8 +19,7 @@
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/base/base.gyp:test_support_base',
- 'rtp_parser/rtp_parser.gypi:*',
+ 'rtp_parser/rtp_parser.gyp:*',
],
},
],
diff --git a/chromium/media/cast/rtp_receiver/rtp_receiver.h b/chromium/media/cast/rtp_receiver/rtp_receiver.h
index 6cac8cadd70..5639d7d8c36 100644
--- a/chromium/media/cast/rtp_receiver/rtp_receiver.h
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver.h
@@ -10,7 +10,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_common/rtp_defines.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -18,7 +18,7 @@ namespace cast {
class RtpData {
public:
virtual void OnReceivedPayloadData(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader* rtp_header) = 0;
protected:
@@ -30,12 +30,15 @@ class RtpParser;
class RtpReceiver {
public:
- RtpReceiver(const AudioReceiverConfig* audio_config,
+ RtpReceiver(base::TickClock* clock,
+ const AudioReceiverConfig* audio_config,
const VideoReceiverConfig* video_config,
RtpData* incoming_payload_callback);
~RtpReceiver();
- bool ReceivedPacket(const uint8* packet, int length);
+ static uint32 GetSsrcOfSender(const uint8* rtcp_buffer, size_t length);
+
+ bool ReceivedPacket(const uint8* packet, size_t length);
void GetStatistics(uint8* fraction_lost,
uint32* cumulative_lost, // 24 bits valid.
diff --git a/chromium/media/cast/rtp_common/rtp_defines.h b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h
index dc64c360340..ae957e3ae6b 100644
--- a/chromium/media/cast/rtp_common/rtp_defines.h
+++ b/chromium/media/cast/rtp_receiver/rtp_receiver_defines.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
-#define MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
+#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_DEFINES_H_
+#define MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_DEFINES_H_
#include "base/basictypes.h"
#include "media/cast/cast_config.h"
@@ -16,7 +16,7 @@ namespace cast {
const uint8 kRtpMarkerBitMask = 0x80;
struct RtpCastHeader {
- void InitRTPVideoHeaderCast() {
+ RtpCastHeader() {
is_key_frame = false;
frame_id = 0;
packet_id = 0;
@@ -26,12 +26,12 @@ struct RtpCastHeader {
}
webrtc::WebRtcRTPHeader webrtc;
bool is_key_frame;
- uint8 frame_id;
+ uint32 frame_id;
uint16 packet_id;
uint16 max_packet_id;
bool is_reference; // Set to true if the previous frame is not available,
// and the reference frame id is available.
- uint8 reference_frame_id;
+ uint32 reference_frame_id;
};
class RtpPayloadFeedback {
@@ -45,4 +45,4 @@ class RtpPayloadFeedback {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
+#endif // MEDIA_CAST_RTP_RECEIVER_RTP_RECEIVER_DEFINES_H_
diff --git a/chromium/media/cast/test/transport/transport.gyp b/chromium/media/cast/test/transport/transport.gyp
new file mode 100644
index 00000000000..79be3d28e6d
--- /dev/null
+++ b/chromium/media/cast/test/transport/transport.gyp
@@ -0,0 +1,22 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_transport',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'transport.cc',
+ 'transport.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/net/net.gyp:net',
+ ],
+ },
+ ],
+} \ No newline at end of file
diff --git a/chromium/media/cast/test/utility/utility.gyp b/chromium/media/cast/test/utility/utility.gyp
new file mode 100644
index 00000000000..021c2d9a416
--- /dev/null
+++ b/chromium/media/cast/test/utility/utility.gyp
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_test_utility',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+
+ ],
+ 'sources': [
+ 'input_helper.cc',
+ 'input_helper.h',
+ '<(DEPTH)/media/cast/test/audio_utility.cc',
+ '<(DEPTH)/media/cast/test/fake_task_runner.cc',
+ '<(DEPTH)/media/cast/test/video_utility.cc',
+ ], # source
+ },
+ ],
+} \ No newline at end of file
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
index 93d3eb5c4aa..10fcb85d36e 100644
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.cc
@@ -4,22 +4,38 @@
#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
+#include "base/bind.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+#include "ui/gfx/size.h"
namespace media {
namespace cast {
-Vp8Decoder::Vp8Decoder(int number_of_cores) {
- decoder_.reset(new vpx_dec_ctx_t());
- InitDecode(number_of_cores);
+void LogFrameDecodedEvent(CastEnvironment* const cast_environment,
+ uint32 frame_id) {
+// TODO(mikhal): Sort out passing of rtp_timestamp.
+// cast_environment->Logging()->InsertFrameEvent(kVideoFrameDecoded,
+// 0, frame_id);
+}
+
+Vp8Decoder::Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment)
+ : decoder_(new vpx_dec_ctx_t()),
+ cast_environment_(cast_environment) {
+ // Make sure that we initialize the decoder from the correct thread.
+ cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
+ base::Bind(&Vp8Decoder::InitDecoder, base::Unretained(this)));
}
Vp8Decoder::~Vp8Decoder() {}
-void Vp8Decoder::InitDecode(int number_of_cores) {
- vpx_codec_dec_cfg_t cfg;
- cfg.threads = number_of_cores;
+void Vp8Decoder::InitDecoder() {
+ vpx_codec_dec_cfg_t cfg;
+ // Initializing to use one core.
+ cfg.threads = 1;
vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
if (vpx_codec_dec_init(decoder_.get(), vpx_codec_vp8_dx(), &cfg, flags)) {
@@ -27,38 +43,60 @@ void Vp8Decoder::InitDecode(int number_of_cores) {
}
}
-bool Vp8Decoder::Decode(const EncodedVideoFrame& input_image,
- I420VideoFrame* decoded_frame) {
- if (input_image.data.empty()) return false;
+bool Vp8Decoder::Decode(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_cb) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
+ const int frame_id_int = static_cast<int>(encoded_frame->frame_id);
+ VLOG(1) << "VP8 decode frame:" << frame_id_int
+ << " sized:" << encoded_frame->data.size();
+
+ if (encoded_frame->data.empty()) return false;
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
- if (vpx_codec_decode(decoder_.get(),
- input_image.data.data(),
- input_image.data.size(),
- 0,
- 1 /* real time*/)) {
+ if (vpx_codec_decode(
+ decoder_.get(),
+ reinterpret_cast<const uint8*>(encoded_frame->data.data()),
+ static_cast<unsigned int>(encoded_frame->data.size()),
+ 0,
+ 1 /* real time*/)) {
+ VLOG(1) << "Failed to decode VP8 frame.";
return false;
}
img = vpx_codec_get_frame(decoder_.get(), &iter);
- if (img == NULL) return false;
-
- // Populate the decoded image.
- decoded_frame->width = img->d_w;
- decoded_frame->height = img->d_h;
-
- decoded_frame->y_plane.stride = img->stride[VPX_PLANE_Y];
- decoded_frame->y_plane.length = img->stride[VPX_PLANE_Y] * img->d_h;
- decoded_frame->y_plane.data = img->planes[VPX_PLANE_Y];
-
- decoded_frame->u_plane.stride = img->stride[VPX_PLANE_U];
- decoded_frame->u_plane.length = img->stride[VPX_PLANE_U] * img->d_h;
- decoded_frame->u_plane.data = img->planes[VPX_PLANE_U];
+ if (img == NULL) {
+ VLOG(1) << "Skip rendering VP8 frame:" << frame_id_int;
+ return false;
+ }
- decoded_frame->v_plane.stride = img->stride[VPX_PLANE_V];
- decoded_frame->v_plane.length = img->stride[VPX_PLANE_V] * img->d_h;
- decoded_frame->v_plane.data = img->planes[VPX_PLANE_V];
+ gfx::Size visible_size(img->d_w, img->d_h);
+ gfx::Size full_size(img->stride[VPX_PLANE_Y], img->d_h);
+ DCHECK(VideoFrame::IsValidConfig(VideoFrame::I420, visible_size,
+ gfx::Rect(visible_size), full_size));
+ // Temp timing setting - will sort out timing in a follow up cl.
+ scoped_refptr<VideoFrame> decoded_frame =
+ VideoFrame::CreateFrame(VideoFrame::I420, visible_size,
+ gfx::Rect(visible_size), full_size, base::TimeDelta());
+
+ // Copy each plane individually (need to account for stride).
+ // TODO(mikhal): Eliminate copy once http://crbug.com/321856 is resolved.
+ CopyPlane(VideoFrame::kYPlane, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->d_h, decoded_frame.get());
+ CopyPlane(VideoFrame::kUPlane, img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], (img->d_h + 1) / 2, decoded_frame.get());
+ CopyPlane(VideoFrame::kVPlane, img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V], (img->d_h + 1) / 2, decoded_frame.get());
+
+ // Log:: Decoding complete (should be called from the main thread).
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(
+ LogFrameDecodedEvent, cast_environment_,encoded_frame->frame_id));
+
+ VLOG(1) << "Decoded frame " << frame_id_int;
+ // Frame decoded - return frame to the user via callback.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(frame_decoded_cb, decoded_frame, render_time));
return true;
}
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
index bed02c8454d..4bc9434d2d2 100644
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.gyp
@@ -19,7 +19,6 @@
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
- '<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
diff --git a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
index 1acdb5a3d30..6a93c41abc9 100644
--- a/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ b/chromium/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
@@ -6,29 +6,38 @@
#define MEDIA_CAST_RTP_RECEVIER_CODECS_VP8_VP8_DECODER_H_
#include "base/memory/scoped_ptr.h"
+#include "base/threading/non_thread_safe.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
typedef struct vpx_codec_ctx vpx_dec_ctx_t;
+// TODO(mikhal): Look into reusing VpxVideoDecoder.
namespace media {
namespace cast {
-class Vp8Decoder {
+// This class is not thread safe; it's only called from the cast video decoder
+// thread.
+class Vp8Decoder : public base::NonThreadSafe {
public:
- explicit Vp8Decoder(int number_of_cores);
-
+ explicit Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment);
~Vp8Decoder();
- // Initialize the decoder.
- void InitDecode(int number_of_cores);
-
- // Decode encoded image (as a part of a video stream).
- bool Decode(const EncodedVideoFrame& input_image,
- I420VideoFrame* decoded_frame);
+ // Decode frame - The decoded frame will be passed via the callback.
+ // Will return false in case of error, and then it's up to the caller to
+ // release the memory.
+ // Ownership of the encoded_frame does not pass to the Vp8Decoder.
+ bool Decode(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback& frame_decoded_cb);
private:
+ // Initialize the decoder.
+ void InitDecoder();
scoped_ptr<vpx_dec_ctx_t> decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
} // namespace cast
diff --git a/chromium/media/cast/video_receiver/video_decoder.cc b/chromium/media/cast/video_receiver/video_decoder.cc
index 238d6db0aba..360cdaa36e9 100644
--- a/chromium/media/cast/video_receiver/video_decoder.cc
+++ b/chromium/media/cast/video_receiver/video_decoder.cc
@@ -12,15 +12,13 @@
namespace media {
namespace cast {
-VideoDecoder::VideoDecoder(scoped_refptr<CastThread> cast_thread,
- const VideoReceiverConfig& video_config)
- : cast_thread_(cast_thread),
- codec_(video_config.codec),
+VideoDecoder::VideoDecoder(const VideoReceiverConfig& video_config,
+ scoped_refptr<CastEnvironment> cast_environment)
+ : codec_(video_config.codec),
vp8_decoder_() {
switch (video_config.codec) {
case kVp8:
- // Initializing to use one core.
- vp8_decoder_.reset(new Vp8Decoder(1));
+ vp8_decoder_.reset(new Vp8Decoder(cast_environment));
break;
case kH264:
NOTIMPLEMENTED();
@@ -33,33 +31,13 @@ VideoDecoder::VideoDecoder(scoped_refptr<CastThread> cast_thread,
VideoDecoder::~VideoDecoder() {}
-void VideoDecoder::DecodeVideoFrame(
- const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback) {
- DecodeFrame(encoded_frame, render_time, frame_decoded_callback);
- // Done with the frame -> release.
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
-}
-
-void VideoDecoder::DecodeFrame(
- const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback) {
+bool VideoDecoder::DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks render_time,
+ const VideoFrameDecodedCallback&
+ frame_decoded_cb) {
DCHECK(encoded_frame->codec == codec_) << "Invalid codec";
- // TODO(mikhal): Allow the application to allocate this memory.
- scoped_ptr<I420VideoFrame> video_frame(new I420VideoFrame());
-
- if (encoded_frame->data.size() > 0) {
- bool success = vp8_decoder_->Decode(*encoded_frame, video_frame.get());
- // Frame decoded - return frame to the user via callback.
- if (success) {
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
- base::Bind(frame_decoded_callback,
- base::Passed(&video_frame), render_time));
- }
- }
+ DCHECK_GT(encoded_frame->data.size(), GG_UINT64_C(0)) << "Empty video frame";
+ return vp8_decoder_->Decode(encoded_frame, render_time, frame_decoded_cb);
}
} // namespace cast
diff --git a/chromium/media/cast/video_receiver/video_decoder.h b/chromium/media/cast/video_receiver/video_decoder.h
index abf1955eb99..97a8a62cc70 100644
--- a/chromium/media/cast/video_receiver/video_decoder.h
+++ b/chromium/media/cast/video_receiver/video_decoder.h
@@ -5,39 +5,34 @@
#ifndef MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
#define MEDIA_CAST_VIDEO_RECEIVER_VIDEO_DECODER_H_
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/non_thread_safe.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/cast_thread.h"
namespace media {
namespace cast {
class Vp8Decoder;
+class VideoFrame;
-class VideoDecoder : public base::RefCountedThreadSafe<VideoDecoder>{
+// This class is not thread safe; it's only called from the cast video decoder
+// thread.
+class VideoDecoder : public base::NonThreadSafe {
public:
- VideoDecoder(scoped_refptr<CastThread> cast_thread,
- const VideoReceiverConfig& video_config);
- ~VideoDecoder();
+ VideoDecoder(const VideoReceiverConfig& video_config,
+ scoped_refptr<CastEnvironment> cast_environment);
+ virtual ~VideoDecoder();
-
- // Decode a video frame. Decoded (raw) frame will be returned in the
- // frame_decoded_callback.
- void DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
+ // Decode a video frame. Decoded (raw) frame will be returned via the
+ // provided callback
+ bool DecodeVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback);
+ const VideoFrameDecodedCallback& frame_decoded_cb);
private:
- void DecodeFrame(const EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback);
VideoCodec codec_;
scoped_ptr<Vp8Decoder> vp8_decoder_;
- scoped_refptr<CastThread> cast_thread_;
DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
};
diff --git a/chromium/media/cast/video_receiver/video_decoder_unittest.cc b/chromium/media/cast/video_receiver/video_decoder_unittest.cc
index 0b95d128b75..6405d1d7bee 100644
--- a/chromium/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/chromium/media/cast/video_receiver/video_decoder_unittest.cc
@@ -3,11 +3,13 @@
// found in the LICENSE file.
#include "base/bind.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
#include "media/cast/test/fake_task_runner.h"
#include "media/cast/video_receiver/video_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -19,76 +21,74 @@ using testing::_;
// Random frame size for testing.
const int kFrameSize = 2345;
+static const int64 kStartMillisecond = GG_INT64_C(1245);
-static void ReleaseFrame(const EncodedVideoFrame* encoded_frame) {
- // Empty since we in this test send in the same frame.
-}
-
-class TestVideoDecoderCallback :
- public base::RefCountedThreadSafe<TestVideoDecoderCallback> {
+namespace {
+class DecodeTestFrameCallback :
+ public base::RefCountedThreadSafe<DecodeTestFrameCallback> {
public:
- TestVideoDecoderCallback()
- : num_called_(0) {}
- // TODO(mikhal): Set and check expectations.
- void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
- const base::TimeTicks render_time) {
- num_called_++;
- }
+ DecodeTestFrameCallback() {}
- int number_times_called() {return num_called_;}
+ void DecodeComplete(const scoped_refptr<media::VideoFrame>& decoded_frame,
+ const base::TimeTicks& render_time) {}
+ protected:
+ virtual ~DecodeTestFrameCallback() {}
private:
- int num_called_;
+ friend class base::RefCountedThreadSafe<DecodeTestFrameCallback>;
};
+} // namespace
class VideoDecoderTest : public ::testing::Test {
protected:
- VideoDecoderTest() {
+ VideoDecoderTest()
+ : task_runner_(new test::FakeTaskRunner(&testing_clock_)),
+ cast_environment_(new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig())),
+ test_callback_(new DecodeTestFrameCallback()) {
// Configure to vp8.
config_.codec = kVp8;
config_.use_external_decoder = false;
- video_decoder_callback_ = new TestVideoDecoderCallback();
+ decoder_.reset(new VideoDecoder(config_, cast_environment_));
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
- ~VideoDecoderTest() {}
- virtual void SetUp() {
- task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
- decoder_ = new VideoDecoder(cast_thread_, config_);
- }
+ virtual ~VideoDecoderTest() {}
- scoped_refptr<VideoDecoder> decoder_;
+ scoped_ptr<VideoDecoder> decoder_;
VideoReceiverConfig config_;
- EncodedVideoFrame encoded_frame_;
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastThread> cast_thread_;
- scoped_refptr<TestVideoDecoderCallback> video_decoder_callback_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_refptr<DecodeTestFrameCallback> test_callback_;
};
-// TODO(pwestin): Test decoding a real frame.
-TEST_F(VideoDecoderTest, SizeZero) {
- encoded_frame_.codec = kVp8;
+// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
+TEST_F(VideoDecoderTest, DISABLED_SizeZero) {
+ EncodedVideoFrame encoded_frame;
base::TimeTicks render_time;
- VideoFrameDecodedCallback frame_decoded_callback =
- base::Bind(&TestVideoDecoderCallback::DecodeComplete,
- video_decoder_callback_.get());
- decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
- frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_));
- EXPECT_EQ(0, video_decoder_callback_->number_times_called());
+ encoded_frame.codec = kVp8;
+ EXPECT_DEATH(
+ decoder_->DecodeVideoFrame(
+ &encoded_frame, render_time,
+ base::Bind(&DecodeTestFrameCallback::DecodeComplete, test_callback_)),
+ "Empty frame");
}
-TEST_F(VideoDecoderTest, InvalidCodec) {
+// TODO(pwestin): EXPECT_DEATH tests can not pass valgrind.
+TEST_F(VideoDecoderTest, DISABLED_InvalidCodec) {
+ EncodedVideoFrame encoded_frame;
base::TimeTicks render_time;
- VideoFrameDecodedCallback frame_decoded_callback =
- base::Bind(&TestVideoDecoderCallback::DecodeComplete,
- video_decoder_callback_.get());
- encoded_frame_.data.assign(kFrameSize, 0);
- encoded_frame_.codec = kExternalVideo;
- EXPECT_DEATH(decoder_->DecodeVideoFrame(&encoded_frame_, render_time,
- frame_decoded_callback, base::Bind(ReleaseFrame, &encoded_frame_)),
- "Invalid codec");
+ encoded_frame.data.assign(kFrameSize, 0);
+ encoded_frame.codec = kExternalVideo;
+ EXPECT_DEATH(
+ decoder_->DecodeVideoFrame(&encoded_frame, render_time, base::Bind(
+ &DecodeTestFrameCallback::DecodeComplete, test_callback_)),
+ "Invalid codec");
}
+// TODO(pwestin): Test decoding a real frame.
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/video_receiver/video_receiver.cc b/chromium/media/cast/video_receiver/video_receiver.cc
index 4d0421cc6c0..98bed1fc699 100644
--- a/chromium/media/cast/video_receiver/video_receiver.cc
+++ b/chromium/media/cast/video_receiver/video_receiver.cc
@@ -5,9 +5,12 @@
#include "media/cast/video_receiver/video_receiver.h"
#include <algorithm>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/cast_defines.h"
#include "media/cast/framer/framer.h"
#include "media/cast/video_receiver/video_decoder.h"
@@ -16,53 +19,29 @@ namespace media {
namespace cast {
const int64 kMinSchedulingDelayMs = 1;
-static const int64 kMaxFrameWaitMs = 20;
-static const int64 kMinTimeBetweenOffsetUpdatesMs = 500;
+
+static const int64 kMinTimeBetweenOffsetUpdatesMs = 2000;
static const int kTimeOffsetFilter = 8;
+static const int64_t kMinProcessIntervalMs = 5;
// Local implementation of RtpData (defined in rtp_rtcp_defines.h).
// Used to pass payload data into the video receiver.
class LocalRtpVideoData : public RtpData {
public:
explicit LocalRtpVideoData(VideoReceiver* video_receiver)
- : video_receiver_(video_receiver),
- time_updated_(false),
- incoming_rtp_timestamp_(0) {
- }
- ~LocalRtpVideoData() {}
+ : video_receiver_(video_receiver) {}
+
+ virtual ~LocalRtpVideoData() {}
virtual void OnReceivedPayloadData(const uint8* payload_data,
- int payload_size,
+ size_t payload_size,
const RtpCastHeader* rtp_header) OVERRIDE {
- {
- if (!time_updated_) {
- incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_incoming_packet_ = video_receiver_->clock_->NowTicks();
- time_updated_ = true;
- } else if (video_receiver_->clock_->NowTicks() > time_incoming_packet_ +
- base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
- incoming_rtp_timestamp_ = rtp_header->webrtc.header.timestamp;
- time_incoming_packet_ = video_receiver_->clock_->NowTicks();
- time_updated_ = true;
- }
- }
- video_receiver_->IncomingRtpPacket(payload_data, payload_size, *rtp_header);
- }
-
- bool GetPacketTimeInformation(base::TimeTicks* time_incoming_packet,
- uint32* incoming_rtp_timestamp) {
- *time_incoming_packet = time_incoming_packet_;
- *incoming_rtp_timestamp = incoming_rtp_timestamp_;
- bool time_updated = time_updated_;
- time_updated_ = false;
- return time_updated;
+ video_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
+ *rtp_header);
}
private:
VideoReceiver* video_receiver_;
- bool time_updated_;
- base::TimeTicks time_incoming_packet_;
- uint32 incoming_rtp_timestamp_;
};
// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
@@ -73,14 +52,11 @@ class LocalRtpVideoFeedback : public RtpPayloadFeedback {
explicit LocalRtpVideoFeedback(VideoReceiver* video_receiver)
: video_receiver_(video_receiver) {
}
+
virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
video_receiver_->CastFeedback(cast_message);
}
- virtual void RequestKeyFrame() OVERRIDE {
- video_receiver_->RequestKeyFrame();
- }
-
private:
VideoReceiver* video_receiver_;
};
@@ -107,156 +83,283 @@ class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
RtpReceiver* rtp_receiver_;
};
-
-VideoReceiver::VideoReceiver(scoped_refptr<CastThread> cast_thread,
+VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender)
- : cast_thread_(cast_thread),
+ : cast_environment_(cast_environment),
codec_(video_config.codec),
- incoming_ssrc_(video_config.incoming_ssrc),
- default_tick_clock_(new base::DefaultTickClock()),
- clock_(default_tick_clock_.get()),
+ target_delay_delta_(
+ base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
+ frame_delay_(base::TimeDelta::FromMilliseconds(
+ 1000 / video_config.max_frame_rate)),
incoming_payload_callback_(new LocalRtpVideoData(this)),
incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
- rtp_receiver_(NULL, &video_config, incoming_payload_callback_.get()),
+ rtp_receiver_(cast_environment_->Clock(), NULL, &video_config,
+ incoming_payload_callback_.get()),
rtp_video_receiver_statistics_(
new LocalRtpReceiverStatistics(&rtp_receiver_)),
+ time_incoming_packet_updated_(false),
+ incoming_rtp_timestamp_(0),
weak_factory_(this) {
- target_delay_delta_ = base::TimeDelta::FromMilliseconds(
- video_config.rtp_max_delay_ms);
int max_unacked_frames = video_config.rtp_max_delay_ms *
video_config.max_frame_rate / 1000;
DCHECK(max_unacked_frames) << "Invalid argument";
- framer_.reset(new Framer(incoming_payload_feedback_.get(),
+ if (video_config.aes_iv_mask.size() == kAesKeySize &&
+ video_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = video_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, video_config.aes_key);
+ decryptor_.reset(new crypto::Encryptor());
+ decryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (video_config.aes_iv_mask.size() != 0 ||
+ video_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
+
+ framer_.reset(new Framer(cast_environment->Clock(),
+ incoming_payload_feedback_.get(),
video_config.incoming_ssrc,
video_config.decoder_faster_than_max_frame_rate,
max_unacked_frames));
if (!video_config.use_external_decoder) {
- video_decoder_ = new VideoDecoder(cast_thread_, video_config);
+ video_decoder_.reset(new VideoDecoder(video_config, cast_environment));
}
- rtcp_.reset(new Rtcp(NULL,
- packet_sender,
- NULL,
- rtp_video_receiver_statistics_.get(),
- video_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- false,
- video_config.feedback_ssrc,
- video_config.rtcp_c_name));
-
- rtcp_->SetRemoteSSRC(video_config.incoming_ssrc);
- ScheduleNextRtcpReport();
- ScheduleNextCastMessage();
+ rtcp_.reset(
+ new Rtcp(cast_environment_,
+ NULL,
+ packet_sender,
+ NULL,
+ rtp_video_receiver_statistics_.get(),
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ video_config.feedback_ssrc,
+ video_config.incoming_ssrc,
+ video_config.rtcp_c_name));
}
VideoReceiver::~VideoReceiver() {}
+void VideoReceiver::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ ScheduleNextRtcpReport();
+ ScheduleNextCastMessage();
+}
+
void VideoReceiver::GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) {
- DCHECK(video_decoder_);
- scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- base::TimeTicks render_time;
- if (GetEncodedVideoFrame(encoded_frame.get(), &render_time)) {
- base::Closure frame_release_callback =
- base::Bind(&VideoReceiver::ReleaseFrame,
- weak_factory_.GetWeakPtr(), encoded_frame->frame_id);
- // Hand the ownership of the encoded frame to the decode thread.
- cast_thread_->PostTask(CastThread::VIDEO_DECODER, FROM_HERE,
- base::Bind(&VideoReceiver::DecodeVideoFrameThread,
- weak_factory_.GetWeakPtr(), encoded_frame.release(),
- render_time, callback, frame_release_callback));
- }
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ GetEncodedVideoFrame(base::Bind(&VideoReceiver::DecodeVideoFrame,
+ base::Unretained(this), callback));
+}
+
+// Called when we have a frame to decode.
+void VideoReceiver::DecodeVideoFrame(
+ const VideoFrameDecodedCallback& callback,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& render_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // Hand the ownership of the encoded frame to the decode thread.
+ cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE,
+ base::Bind(&VideoReceiver::DecodeVideoFrameThread, base::Unretained(this),
+ base::Passed(&encoded_frame), render_time, callback));
}
// Utility function to run the decoder on a designated decoding thread.
void VideoReceiver::DecodeVideoFrameThread(
- const EncodedVideoFrame* encoded_frame,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback) {
- video_decoder_->DecodeVideoFrame(encoded_frame, render_time,
- frame_decoded_callback, frame_release_callback);
- // Release memory.
- delete encoded_frame;
+ const VideoFrameDecodedCallback& frame_decoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_DECODER));
+ DCHECK(video_decoder_);
+
+ if (!(video_decoder_->DecodeVideoFrame(encoded_frame.get(), render_time,
+ frame_decoded_callback))) {
+ // This will happen if we decide to decode but not show a frame.
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetRawVideoFrame, base::Unretained(this),
+ frame_decoded_callback));
+ }
}
-bool VideoReceiver::GetEncodedVideoFrame(EncodedVideoFrame* encoded_frame,
- base::TimeTicks* render_time) {
- DCHECK(encoded_frame);
- DCHECK(render_time);
+bool VideoReceiver::DecryptVideoFrame(
+ scoped_ptr<EncodedVideoFrame>* video_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(decryptor_) << "Invalid state";
+ if (!decryptor_->SetCounter(GetAesNonce((*video_frame)->frame_id,
+ iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+ std::string decrypted_video_data;
+ if (!decryptor_->Decrypt((*video_frame)->data, &decrypted_video_data)) {
+ VLOG(1) << "Decryption error";
+ // Give up on this frame, release it from jitter buffer.
+ framer_->ReleaseFrame((*video_frame)->frame_id);
+ return false;
+ }
+ (*video_frame)->data.swap(decrypted_video_data);
+ return true;
+}
+
+// Called from the main cast thread.
+void VideoReceiver::GetEncodedVideoFrame(
+ const VideoFrameEncodedCallback& callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
uint32 rtp_timestamp = 0;
bool next_frame = false;
- base::TimeTicks timeout = clock_->NowTicks() +
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
- if (!framer_->GetEncodedVideoFrame(timeout,
- encoded_frame,
- &rtp_timestamp,
+ if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
&next_frame)) {
- return false;
+ // We have no video frames. Wait for new packet(s).
+ queued_encoded_callbacks_.push_back(callback);
+ return;
}
- base::TimeTicks now = clock_->NowTicks();
+
+ if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
+ // Logging already done.
+ queued_encoded_callbacks_.push_back(callback);
+ return;
+ }
+
+ base::TimeTicks render_time;
+ if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
+ &render_time)) {
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(&encoded_frame), render_time));
+ } else {
+ // We have a video frame; however we are missing packets and we have time
+ // to wait for new packet(s).
+ queued_encoded_callbacks_.push_back(callback);
+ }
+}
+
+// Should we pull the encoded video frame from the framer? decided by if this is
+// the next frame or we are running out of time and have to pull the following
+// frame.
+// If the frame is too old to be rendered we set the don't show flag in the
+// video bitstream where possible.
+bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp,
+ bool next_frame, scoped_ptr<EncodedVideoFrame>* encoded_frame,
+ base::TimeTicks* render_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
*render_time = GetRenderTime(now, rtp_timestamp);
- base::TimeDelta max_frame_wait_delta =
- base::TimeDelta::FromMilliseconds(kMaxFrameWaitMs);
+ // TODO(mikhal): Store actual render time and not diff.
+ cast_environment_->Logging()->InsertFrameEventWithDelay(kVideoRenderDelay,
+ rtp_timestamp, (*encoded_frame)->frame_id, now - *render_time);
+
+ // Minimum time before a frame is due to be rendered before we pull it for
+ // decode.
+ base::TimeDelta min_wait_delta = frame_delay_;
base::TimeDelta time_until_render = *render_time - now;
- base::TimeDelta time_until_release = time_until_render - max_frame_wait_delta;
- base::TimeDelta zero_delta = base::TimeDelta::FromMilliseconds(0);
- if (!next_frame && (time_until_release > zero_delta)) {
- // TODO(mikhal): If returning false, then the application should sleep, or
- // else which may spin here. Alternatively, we could sleep here, which will
- // be posting a delayed task to ourselves, but then can end up in getting
- // stuck as well.
+ if (!next_frame && (time_until_render > min_wait_delta)) {
+ // Example:
+ // We have decoded frame 1 and we have received the complete frame 3, but
+ // not frame 2. If we still have time before frame 3 should be rendered we
+ // will wait for 2 to arrive, however if 2 never show up this timer will hit
+ // and we will pull out frame 3 for decoding and rendering.
+ base::TimeDelta time_until_release = time_until_render - min_wait_delta;
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
+ time_until_release);
+ VLOG(1) << "Wait before releasing frame "
+ << static_cast<int>((*encoded_frame)->frame_id)
+ << " time " << time_until_release.InMilliseconds();
return false;
}
- base::TimeDelta dont_show_timeout_delta = time_until_render -
+ base::TimeDelta dont_show_timeout_delta =
base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) {
- encoded_frame->data[0] &= 0xef;
- VLOG(1) << "Don't show frame";
+ (*encoded_frame)->data[0] &= 0xef;
+ VLOG(1) << "Don't show frame "
+ << static_cast<int>((*encoded_frame)->frame_id)
+ << " time_until_render:" << time_until_render.InMilliseconds();
+ } else {
+ VLOG(1) << "Show frame "
+ << static_cast<int>((*encoded_frame)->frame_id)
+ << " time_until_render:" << time_until_render.InMilliseconds();
}
-
- encoded_frame->codec = codec_;
+ // We have a copy of the frame, release this one.
+ framer_->ReleaseFrame((*encoded_frame)->frame_id);
+ (*encoded_frame)->codec = codec_;
return true;
}
+void VideoReceiver::PlayoutTimeout() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (queued_encoded_callbacks_.empty()) return;
+
+ uint32 rtp_timestamp = 0;
+ bool next_frame = false;
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+
+ if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &rtp_timestamp,
+ &next_frame)) {
+ // We have no video frames. Wait for new packet(s).
+ // Since the application can post multiple VideoFrameEncodedCallback and
+ // we only check the next frame to play out we might have multiple timeout
+ // events firing after each other; however this should be a rare event.
+ VLOG(1) << "Failed to retrieved a complete frame at this point in time";
+ return;
+ }
+ VLOG(1) << "PlayoutTimeout retrieved frame "
+ << static_cast<int>(encoded_frame->frame_id);
+
+ if (decryptor_ && !DecryptVideoFrame(&encoded_frame)) {
+ // Logging already done.
+ return;
+ }
+
+ base::TimeTicks render_time;
+ if (PullEncodedVideoFrame(rtp_timestamp, next_frame, &encoded_frame,
+ &render_time)) {
+ if (!queued_encoded_callbacks_.empty()) {
+ VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
+ queued_encoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(callback, base::Passed(&encoded_frame), render_time));
+ }
+ } else {
+ // We have a video frame; however we are missing packets and we have time
+ // to wait for new packet(s).
+ }
+}
+
base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
uint32 rtp_timestamp) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Senders time in ms when this frame was captured.
// Note: the senders clock and our local clock might not be synced.
base::TimeTicks rtp_timestamp_in_ticks;
- base::TimeTicks time_incoming_packet;
- uint32 incoming_rtp_timestamp;
-
- if (time_offset_.InMilliseconds()) { // was == 0
- incoming_payload_callback_->GetPacketTimeInformation(
- &time_incoming_packet, &incoming_rtp_timestamp);
+ if (time_offset_.InMilliseconds() == 0) {
if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp,
+ incoming_rtp_timestamp_,
&rtp_timestamp_in_ticks)) {
// We have not received any RTCP to sync the stream play it out as soon as
// possible.
return now;
}
- time_offset_ = time_incoming_packet - rtp_timestamp_in_ticks;
- } else if (incoming_payload_callback_->GetPacketTimeInformation(
- &time_incoming_packet, &incoming_rtp_timestamp)) {
+ time_offset_ = time_incoming_packet_ - rtp_timestamp_in_ticks;
+ } else if (time_incoming_packet_updated_) {
if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp,
+ incoming_rtp_timestamp_,
&rtp_timestamp_in_ticks)) {
// Time to update the time_offset.
base::TimeDelta time_offset =
- time_incoming_packet - rtp_timestamp_in_ticks;
+ time_incoming_packet_ - rtp_timestamp_in_ticks;
time_offset_ = ((kTimeOffsetFilter - 1) * time_offset_ + time_offset)
/ kTimeOffsetFilter;
}
}
+ // Reset |time_incoming_packet_updated_| to enable a future measurement.
+ time_incoming_packet_updated_ = false;
if (!rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
rtp_timestamp,
&rtp_timestamp_in_ticks)) {
@@ -266,70 +369,95 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
return (rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_);
}
-void VideoReceiver::IncomingPacket(const uint8* packet, int length) {
+void VideoReceiver::IncomingPacket(const uint8* packet, size_t length,
+ const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (Rtcp::IsRtcpPacket(packet, length)) {
rtcp_->IncomingRtcpPacket(packet, length);
- return;
+ } else {
+ rtp_receiver_.ReceivedPacket(packet, length);
}
- rtp_receiver_.ReceivedPacket(packet, length);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
-void VideoReceiver::IncomingRtpPacket(const uint8* payload_data,
- int payload_size,
- const RtpCastHeader& rtp_header) {
- framer_->InsertPacket(payload_data, payload_size, rtp_header);
+void VideoReceiver::IncomingParsedRtpPacket(const uint8* payload_data,
+ size_t payload_size,
+ const RtpCastHeader& rtp_header) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ if (time_incoming_packet_.is_null() || now - time_incoming_packet_ >
+ base::TimeDelta::FromMilliseconds(kMinTimeBetweenOffsetUpdatesMs)) {
+ if (time_incoming_packet_.is_null()) InitializeTimers();
+ incoming_rtp_timestamp_ = rtp_header.webrtc.header.timestamp;
+ time_incoming_packet_ = now;
+ time_incoming_packet_updated_ = true;
+ }
+
+ cast_environment_->Logging()->InsertPacketEvent(kPacketReceived,
+ rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
+ rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+
+ bool complete = framer_->InsertPacket(payload_data, payload_size, rtp_header);
+
+ if (!complete) return; // Video frame not complete; wait for more packets.
+ if (queued_encoded_callbacks_.empty()) return; // No pending callback.
+
+ VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
+ queued_encoded_callbacks_.pop_front();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(&VideoReceiver::GetEncodedVideoFrame,
+ weak_factory_.GetWeakPtr(), callback));
}
// Send a cast feedback message. Actual message created in the framer (cast
// message builder).
void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- rtcp_->SendRtcpCast(cast_message);
- time_last_sent_cast_message_= clock_->NowTicks();
-}
-
-void VideoReceiver::ReleaseFrame(uint8 frame_id) {
- framer_->ReleaseFrame(frame_id);
-}
-
-// Send a key frame request to the sender.
-void VideoReceiver::RequestKeyFrame() {
- rtcp_->SendRtcpPli(incoming_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ // TODO(pwestin): wire up log messages.
+ rtcp_->SendRtcpFromRtpReceiver(&cast_message, NULL);
+ time_last_sent_cast_message_= cast_environment_->Clock()->NowTicks();
}
// Cast messages should be sent within a maximum interval. Schedule a call
// if not triggered elsewhere, e.g. by the cast message_builder.
void VideoReceiver::ScheduleNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks send_time;
framer_->TimeToSendNextCastMessage(&send_time);
- base::TimeDelta time_to_send = send_time - clock_->NowTicks();
+ base::TimeDelta time_to_send = send_time -
+ cast_environment_->Clock()->NowTicks();
time_to_send = std::max(time_to_send,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoReceiver::SendNextCastMessage,
weak_factory_.GetWeakPtr()), time_to_send);
}
void VideoReceiver::SendNextCastMessage() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
framer_->SendCastMessage(); // Will only send a message if it is time.
ScheduleNextCastMessage();
}
// Schedule the next RTCP report to be sent back to the sender.
void VideoReceiver::ScheduleNextRtcpReport() {
- base::TimeDelta time_to_next =
- rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoReceiver::SendNextRtcpReport,
weak_factory_.GetWeakPtr()), time_to_next);
}
void VideoReceiver::SendNextRtcpReport() {
- rtcp_->SendRtcpReport(incoming_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
diff --git a/chromium/media/cast/video_receiver/video_receiver.gypi b/chromium/media/cast/video_receiver/video_receiver.gypi
index bbee92e5ca2..e1a9902872e 100644
--- a/chromium/media/cast/video_receiver/video_receiver.gypi
+++ b/chromium/media/cast/video_receiver/video_receiver.gypi
@@ -19,6 +19,7 @@
'video_receiver.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'framer/framer.gyp:cast_framer',
'video_receiver/codecs/vp8/vp8_decoder.gyp:cast_vp8_decoder',
'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
diff --git a/chromium/media/cast/video_receiver/video_receiver.h b/chromium/media/cast/video_receiver/video_receiver.h
index 40d0b0320a5..fbc3653a514 100644
--- a/chromium/media/cast/video_receiver/video_receiver.h
+++ b/chromium/media/cast/video_receiver/video_receiver.h
@@ -11,15 +11,18 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/cast_thread.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+
+namespace crypto {
+ class Encryptor;
+}
namespace media {
namespace cast {
@@ -33,13 +36,11 @@ class Rtcp;
class RtpReceiverStatistics;
class VideoDecoder;
-
// Should only be called from the Main cast thread.
class VideoReceiver : public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoReceiver> {
public:
-
- VideoReceiver(scoped_refptr<CastThread> cast_thread,
+ VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender);
@@ -48,69 +49,78 @@ class VideoReceiver : public base::NonThreadSafe,
// Request a raw frame. Will return frame via callback when available.
void GetRawVideoFrame(const VideoFrameDecodedCallback& callback);
- // Request an encoded frame. Memory allocated by application.
- bool GetEncodedVideoFrame(EncodedVideoFrame* video_frame,
- base::TimeTicks* render_time);
+ // Request an encoded frame. Will return frame via callback when available.
+ void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback);
// Insert a RTP packet to the video receiver.
- void IncomingPacket(const uint8* packet, int length);
-
- // Release frame - should be called following a GetEncodedVideoFrame call.
- // Removes frame from the frame map in the framer.
- void ReleaseFrame(uint8 frame_id);
+ void IncomingPacket(const uint8* packet, size_t length,
+ const base::Closure callback);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- rtcp_->set_clock(clock);
- }
protected:
- void IncomingRtpPacket(const uint8* payload_data,
- int payload_size,
- const RtpCastHeader& rtp_header);
+ void IncomingParsedRtpPacket(const uint8* payload_data,
+ size_t payload_size,
+ const RtpCastHeader& rtp_header);
void DecodeVideoFrameThread(
- const EncodedVideoFrame* encoded_frame,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback,
- base::Closure frame_release_callback);
+ const VideoFrameDecodedCallback& frame_decoded_callback);
private:
friend class LocalRtpVideoData;
friend class LocalRtpVideoFeedback;
void CastFeedback(const RtcpCastMessage& cast_message);
- void RequestKeyFrame();
+
+ void DecodeVideoFrame(const VideoFrameDecodedCallback& callback,
+ scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& render_time);
+
+ bool DecryptVideoFrame(scoped_ptr<EncodedVideoFrame>* video_frame);
+
+ bool PullEncodedVideoFrame(uint32 rtp_timestamp,
+ bool next_frame,
+ scoped_ptr<EncodedVideoFrame>* encoded_frame,
+ base::TimeTicks* render_time);
+
+ void PlayoutTimeout();
// Returns Render time based on current time and the rtp timestamp.
base::TimeTicks GetRenderTime(base::TimeTicks now, uint32 rtp_timestamp);
+ void InitializeTimers();
+
// Schedule timing for the next cast message.
void ScheduleNextCastMessage();
// Schedule timing for the next RTCP report.
void ScheduleNextRtcpReport();
+
// Actually send the next cast message.
void SendNextCastMessage();
+
// Actually send the next RTCP report.
void SendNextRtcpReport();
- scoped_refptr<VideoDecoder> video_decoder_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<VideoDecoder> video_decoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<Framer> framer_;
const VideoCodec codec_;
- const uint32 incoming_ssrc_;
base::TimeDelta target_delay_delta_;
+ base::TimeDelta frame_delay_;
scoped_ptr<LocalRtpVideoData> incoming_payload_callback_;
scoped_ptr<LocalRtpVideoFeedback> incoming_payload_feedback_;
RtpReceiver rtp_receiver_;
scoped_ptr<Rtcp> rtcp_;
scoped_ptr<RtpReceiverStatistics> rtp_video_receiver_statistics_;
base::TimeTicks time_last_sent_cast_message_;
- // Sender-receiver offset estimation.
- base::TimeDelta time_offset_;
-
- scoped_ptr<base::TickClock> default_tick_clock_;
- base::TickClock* clock_;
+ base::TimeDelta time_offset_; // Sender-receiver offset estimation.
+ scoped_ptr<crypto::Encryptor> decryptor_;
+ std::string iv_mask_;
+ std::list<VideoFrameEncodedCallback> queued_encoded_callbacks_;
+ bool time_incoming_packet_updated_;
+ base::TimeTicks time_incoming_packet_;
+ uint32 incoming_rtp_timestamp_;
base::WeakPtrFactory<VideoReceiver> weak_factory_;
@@ -121,4 +131,3 @@ class VideoReceiver : public base::NonThreadSafe,
} // namespace media
#endif // MEDIA_CAST_VIDEO_RECEIVER_VIDEO_RECEIVER_H_
-
diff --git a/chromium/media/cast/video_receiver/video_receiver_unittest.cc b/chromium/media/cast/video_receiver/video_receiver_unittest.cc
index b1b1c0b599f..8001ac430d6 100644
--- a/chromium/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/chromium/media/cast/video_receiver/video_receiver_unittest.cc
@@ -7,44 +7,61 @@
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
#include "media/cast/test/fake_task_runner.h"
#include "media/cast/video_receiver/video_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
static const int kPacketSize = 1500;
-static const int64 kStartMillisecond = 123456789;
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
namespace media {
namespace cast {
using testing::_;
-// was thread counted thread safe.
+namespace {
+// Was thread counted thread safe.
class TestVideoReceiverCallback :
public base::RefCountedThreadSafe<TestVideoReceiverCallback> {
public:
TestVideoReceiverCallback()
- :num_called_(0) {}
+ : num_called_(0) {}
+
// TODO(mikhal): Set and check expectations.
- void DecodeComplete(scoped_ptr<I420VideoFrame> frame,
- const base::TimeTicks render_time) {
+ void DecodeComplete(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& render_time) {
++num_called_;
}
- int number_times_called() { return num_called_;}
+
+ void FrameToDecode(scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& render_time) {
+ EXPECT_TRUE(video_frame->key_frame);
+ EXPECT_EQ(kVp8, video_frame->codec);
+ ++num_called_;
+ }
+
+ int number_times_called() const { return num_called_;}
+
+ protected:
+ virtual ~TestVideoReceiverCallback() {}
+
private:
+ friend class base::RefCountedThreadSafe<TestVideoReceiverCallback>;
+
int num_called_;
};
+} // namespace
class PeerVideoReceiver : public VideoReceiver {
public:
- PeerVideoReceiver(scoped_refptr<CastThread> cast_thread,
+ PeerVideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
PacedPacketSender* const packet_sender)
- : VideoReceiver(cast_thread, video_config, packet_sender) {
+ : VideoReceiver(cast_environment, video_config, packet_sender) {
}
- using VideoReceiver::IncomingRtpPacket;
+ using VideoReceiver::IncomingParsedRtpPacket;
};
@@ -55,17 +72,17 @@ class VideoReceiverTest : public ::testing::Test {
config_.codec = kVp8;
config_.use_external_decoder = false;
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
receiver_.reset(new
- PeerVideoReceiver(cast_thread_, config_, &mock_transport_));
+ PeerVideoReceiver(cast_environment_, config_, &mock_transport_));
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
video_receiver_callback_ = new TestVideoReceiverCallback();
- receiver_->set_clock(&testing_clock_);
}
- ~VideoReceiverTest() {}
+ virtual ~VideoReceiverTest() {}
virtual void SetUp() {
payload_.assign(kPacketSize, 0);
@@ -87,51 +104,65 @@ class VideoReceiverTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_refptr<TestVideoReceiverCallback> video_receiver_callback_;
};
TEST_F(VideoReceiverTest, GetOnePacketEncodedframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
- EncodedVideoFrame video_frame;
- base::TimeTicks render_time;
- EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
- EXPECT_TRUE(video_frame.key_frame);
- EXPECT_EQ(kVp8, video_frame.codec);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
+
+ VideoFrameEncodedCallback frame_to_decode_callback =
+ base::Bind(&TestVideoReceiverCallback::FrameToDecode,
+ video_receiver_callback_);
+
+ receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
}
TEST_F(VideoReceiverTest, MultiplePackets) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
rtp_header_.max_packet_id = 2;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
++rtp_header_.packet_id;
++rtp_header_.webrtc.header.sequenceNumber;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
++rtp_header_.packet_id;
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
- EncodedVideoFrame video_frame;
- base::TimeTicks render_time;
- EXPECT_TRUE(receiver_->GetEncodedVideoFrame(&video_frame, &render_time));
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
+
+ VideoFrameEncodedCallback frame_to_decode_callback =
+ base::Bind(&TestVideoReceiverCallback::FrameToDecode,
+ video_receiver_callback_);
+
+ receiver_->GetEncodedVideoFrame(frame_to_decode_callback);
+
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 1);
}
-// TODO(pwestin): add encoded frames.
TEST_F(VideoReceiverTest, GetOnePacketRawframe) {
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).WillRepeatedly(
testing::Return(true));
- receiver_->IncomingRtpPacket(payload_.data(), payload_.size(), rtp_header_);
+ receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
+ rtp_header_);
// Decode error - requires legal input.
VideoFrameDecodedCallback frame_decoded_callback =
base::Bind(&TestVideoReceiverCallback::DecodeComplete,
video_receiver_callback_);
receiver_->GetRawVideoFrame(frame_decoded_callback);
task_runner_->RunTasks();
+ EXPECT_EQ(video_receiver_callback_->number_times_called(), 0);
}
+// TODO(pwestin): add encoded frames.
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index eaf6fbd714b..099be63a2c8 100644
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -9,8 +9,8 @@
#include <vector>
#include "base/logging.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/rtp_common/rtp_defines.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
namespace media {
@@ -38,6 +38,12 @@ Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
timestamp_(0),
last_encoded_frame_id_(kStartFrameId),
number_of_repeated_buffers_(0) {
+ // TODO(pwestin): we need to figure out how to synchronize the acking with the
+ // internal state of the encoder, ideally the encoder will tell if we can
+ // send another frame.
+ DCHECK(!use_multiple_video_buffers_ ||
+ max_number_of_repeated_buffers_in_a_row_ == 0) << "Invalid config";
+
// VP8 have 3 buffers available for prediction, with
// max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
// however in this mode we can not skip frames in the receiver to catch up
@@ -115,17 +121,20 @@ void Vp8Encoder::InitEncode(int number_of_cores) {
rc_max_intra_target);
}
-bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
+bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
EncodedVideoFrame* encoded_image) {
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
- raw_image_->planes[PLANE_Y] = const_cast<uint8*>(input_image.y_plane.data);
- raw_image_->planes[PLANE_U] = const_cast<uint8*>(input_image.u_plane.data);
- raw_image_->planes[PLANE_V] = const_cast<uint8*>(input_image.v_plane.data);
+ raw_image_->planes[PLANE_Y] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kYPlane));
+ raw_image_->planes[PLANE_U] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kUPlane));
+ raw_image_->planes[PLANE_V] =
+ const_cast<uint8*>(video_frame->data(VideoFrame::kVPlane));
- raw_image_->stride[VPX_PLANE_Y] = input_image.y_plane.stride;
- raw_image_->stride[VPX_PLANE_U] = input_image.u_plane.stride;
- raw_image_->stride[VPX_PLANE_V] = input_image.v_plane.stride;
+ raw_image_->stride[VPX_PLANE_Y] = video_frame->stride(VideoFrame::kYPlane);
+ raw_image_->stride[VPX_PLANE_U] = video_frame->stride(VideoFrame::kUPlane);
+ raw_image_->stride[VPX_PLANE_V] = video_frame->stride(VideoFrame::kVPlane);
uint8 latest_frame_id_to_reference;
Vp8Buffers buffer_to_update;
@@ -158,7 +167,7 @@ bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
// Get encoded frame.
const vpx_codec_cx_pkt_t *pkt = NULL;
vpx_codec_iter_t iter = NULL;
- int total_size = 0;
+ size_t total_size = 0;
while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
total_size += pkt->data.frame.sz;
@@ -183,6 +192,9 @@ bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
encoded_image->frame_id = ++last_encoded_frame_id_;
+ VLOG(1) << "VP8 encoded frame:" << static_cast<int>(encoded_image->frame_id)
+ << " sized:" << total_size;
+
if (encoded_image->key_frame) {
key_frame_requested_ = false;
@@ -221,7 +233,7 @@ void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
}
}
-uint8 Vp8Encoder::GetLatestFrameIdToReference() {
+uint32 Vp8Encoder::GetLatestFrameIdToReference() {
if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
int latest_frame_id_to_reference = -1;
@@ -249,7 +261,7 @@ uint8 Vp8Encoder::GetLatestFrameIdToReference() {
}
}
DCHECK(latest_frame_id_to_reference != -1) << "Invalid state";
- return static_cast<uint8>(latest_frame_id_to_reference);
+ return static_cast<uint32>(latest_frame_id_to_reference);
}
Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
@@ -267,12 +279,15 @@ Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
switch (last_used_vp8_buffer_) {
case kAltRefBuffer:
buffer_to_update = kLastBuffer;
+ VLOG(1) << "VP8 update last buffer";
break;
case kLastBuffer:
buffer_to_update = kGoldenBuffer;
+ VLOG(1) << "VP8 update golden buffer";
break;
case kGoldenBuffer:
buffer_to_update = kAltRefBuffer;
+ VLOG(1) << "VP8 update alt-ref buffer";
break;
case kNoBuffer:
DCHECK(false) << "Invalid state";
@@ -310,16 +325,21 @@ void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
}
void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
- config_->rc_target_bitrate = new_bitrate / 1000; // In kbit/s.
+ uint32 new_bitrate_kbit = new_bitrate / 1000;
+ if (config_->rc_target_bitrate == new_bitrate_kbit) return;
+
+ config_->rc_target_bitrate = new_bitrate_kbit;
+
// Update encoder context.
if (vpx_codec_enc_config_set(encoder_, config_.get())) {
DCHECK(false) << "Invalid return value";
}
}
-void Vp8Encoder::LatestFrameIdToReference(uint8 frame_id) {
+void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
if (!use_multiple_video_buffers_) return;
+ VLOG(1) << "VP8 ok to reference frame:" << static_cast<int>(frame_id);
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
if (frame_id == used_buffers_frame_id_[i]) {
acked_frame_buffers_[i] = true;
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
index 0b12789aa05..fa9c2944a15 100644
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
@@ -12,6 +12,7 @@
'vp8_encoder.h',
], # source
'dependencies': [
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
],
},
diff --git a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
index 3b041a01d26..d09cc27dabc 100644
--- a/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/chromium/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -10,6 +10,10 @@
#include "media/cast/cast_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+namespace media {
+class VideoFrame;
+}
+
// VPX forward declaration.
typedef struct vpx_codec_ctx vpx_enc_ctx_t;
@@ -26,7 +30,7 @@ class Vp8Encoder {
~Vp8Encoder();
// Encode a raw image (as a part of a video stream).
- bool Encode(const I420VideoFrame& input_image,
+ bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
EncodedVideoFrame* encoded_image);
// Update the encoder with a new target bit rate.
@@ -35,14 +39,14 @@ class Vp8Encoder {
// Set the next frame to be a key frame.
void GenerateKeyFrame();
- void LatestFrameIdToReference(uint8 frame_id);
+ void LatestFrameIdToReference(uint32 frame_id);
private:
enum Vp8Buffers {
kAltRefBuffer = 0,
kGoldenBuffer = 1,
kLastBuffer = 2,
- kNoBuffer = 3 // Note: must be last.
+ kNoBuffer = 3 // Note: must be last.
};
void InitEncode(int number_of_cores);
@@ -54,7 +58,7 @@ class Vp8Encoder {
Vp8Buffers GetNextBufferToUpdate();
// Calculate which previous frame to reference.
- uint8_t GetLatestFrameIdToReference();
+ uint32 GetLatestFrameIdToReference();
// Get encoder flags for our referenced encoder buffers.
void GetCodecReferenceFlags(vpx_codec_flags_t* flags);
@@ -74,8 +78,8 @@ class Vp8Encoder {
bool key_frame_requested_;
int64 timestamp_;
- uint8 last_encoded_frame_id_;
- uint8 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
+ uint32 last_encoded_frame_id_;
+ uint32 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
Vp8Buffers last_used_vp8_buffer_;
int number_of_repeated_buffers_;
diff --git a/chromium/media/cast/video_sender/mock_video_encoder_controller.cc b/chromium/media/cast/video_sender/mock_video_encoder_controller.cc
new file mode 100644
index 00000000000..4f649aa44fe
--- /dev/null
+++ b/chromium/media/cast/video_sender/mock_video_encoder_controller.cc
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/mock_video_encoder_controller.h"
+
+namespace media {
+namespace cast {
+
+MockVideoEncoderController::MockVideoEncoderController() {
+}
+
+MockVideoEncoderController::~MockVideoEncoderController() {
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/video_sender/mock_video_encoder_controller.h b/chromium/media/cast/video_sender/mock_video_encoder_controller.h
index 90b2abdf3bc..cfc58a9eb8f 100644
--- a/chromium/media/cast/video_sender/mock_video_encoder_controller.h
+++ b/chromium/media/cast/video_sender/mock_video_encoder_controller.h
@@ -13,13 +13,16 @@ namespace cast {
class MockVideoEncoderController : public VideoEncoderController {
public:
+ MockVideoEncoderController();
+ virtual ~MockVideoEncoderController();
+
MOCK_METHOD1(SetBitRate, void(int new_bit_rate));
MOCK_METHOD1(SkipNextFrame, void(bool skip_next_frame));
MOCK_METHOD0(GenerateKeyFrame, void());
- MOCK_METHOD1(LatestFrameIdToReference, void(uint8 frame_id));
+ MOCK_METHOD1(LatestFrameIdToReference, void(uint32 frame_id));
MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
};
diff --git a/chromium/media/cast/video_sender/video_encoder.cc b/chromium/media/cast/video_sender/video_encoder.cc
index 94a296c1bc0..faa78d3a3e7 100644
--- a/chromium/media/cast/video_sender/video_encoder.cc
+++ b/chromium/media/cast/video_sender/video_encoder.cc
@@ -6,15 +6,24 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_defines.h"
namespace media {
namespace cast {
-VideoEncoder::VideoEncoder(scoped_refptr<CastThread> cast_thread,
+void LogFrameEncodedEvent(CastEnvironment* const cast_environment,
+ const base::TimeTicks& capture_time) {
+ cast_environment->Logging()->InsertFrameEvent(kVideoFrameEncoded,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
+}
+
+VideoEncoder::VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
uint8 max_unacked_frames)
: video_config_(video_config),
- cast_thread_(cast_thread),
+ cast_environment_(cast_environment),
skip_next_frame_(false),
skip_count_(0) {
if (video_config.codec == kVp8) {
@@ -31,10 +40,10 @@ VideoEncoder::VideoEncoder(scoped_refptr<CastThread> cast_thread,
VideoEncoder::~VideoEncoder() {}
bool VideoEncoder::EncodeVideoFrame(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure frame_release_callback) {
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (video_config_.codec != kVp8) return false;
if (skip_next_frame_) {
@@ -43,21 +52,23 @@ bool VideoEncoder::EncodeVideoFrame(
return false;
}
- cast_thread_->PostTask(CastThread::VIDEO_ENCODER, FROM_HERE,
- base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this,
- video_frame, capture_time, dynamic_config_, frame_encoded_callback,
- frame_release_callback));
+ cast_environment_->Logging()->InsertFrameEvent(kVideoFrameSentToEncoder,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
+ cast_environment_->PostTask(CastEnvironment::VIDEO_ENCODER, FROM_HERE,
+ base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread,
+ base::Unretained(this), video_frame, capture_time,
+ dynamic_config_, frame_encoded_callback));
dynamic_config_.key_frame_requested = false;
return true;
}
void VideoEncoder::EncodeVideoFrameEncoderThread(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const CodecDynamicConfig& dynamic_config,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure frame_release_callback) {
+ const FrameEncodedCallback& frame_encoded_callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO_ENCODER));
if (dynamic_config.key_frame_requested) {
vp8_encoder_->GenerateKeyFrame();
}
@@ -66,10 +77,10 @@ void VideoEncoder::EncodeVideoFrameEncoderThread(
vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
- bool retval = vp8_encoder_->Encode(*video_frame, encoded_frame.get());
+ bool retval = vp8_encoder_->Encode(video_frame, encoded_frame.get());
- // We are done with the video frame release it.
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ base::Bind(LogFrameEncodedEvent, cast_environment_, capture_time));
if (!retval) {
VLOG(1) << "Encoding failed";
@@ -79,32 +90,32 @@ void VideoEncoder::EncodeVideoFrameEncoderThread(
VLOG(1) << "Encoding resulted in an empty frame";
return;
}
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(frame_encoded_callback,
base::Passed(&encoded_frame), capture_time));
}
// Inform the encoder about the new target bit rate.
-void VideoEncoder::SetBitRate(int new_bit_rate) OVERRIDE {
+void VideoEncoder::SetBitRate(int new_bit_rate) {
dynamic_config_.bit_rate = new_bit_rate;
}
// Inform the encoder to not encode the next frame.
-void VideoEncoder::SkipNextFrame(bool skip_next_frame) OVERRIDE {
+void VideoEncoder::SkipNextFrame(bool skip_next_frame) {
skip_next_frame_ = skip_next_frame;
}
// Inform the encoder to encode the next frame as a key frame.
-void VideoEncoder::GenerateKeyFrame() OVERRIDE {
+void VideoEncoder::GenerateKeyFrame() {
dynamic_config_.key_frame_requested = true;
}
// Inform the encoder to only reference frames older or equal to frame_id;
-void VideoEncoder::LatestFrameIdToReference(uint8 frame_id) OVERRIDE {
+void VideoEncoder::LatestFrameIdToReference(uint32 frame_id) {
dynamic_config_.latest_frame_id_to_reference = frame_id;
}
-int VideoEncoder::NumberOfSkippedFrames() const OVERRIDE {
+int VideoEncoder::NumberOfSkippedFrames() const {
return skip_count_;
}
diff --git a/chromium/media/cast/video_sender/video_encoder.h b/chromium/media/cast/video_sender/video_encoder.h
index d3b261e1033..559dff16734 100644
--- a/chromium/media/cast/video_sender/video_encoder.h
+++ b/chromium/media/cast/video_sender/video_encoder.h
@@ -5,25 +5,28 @@
#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
namespace media {
+class VideoFrame;
+}
+
+namespace media {
namespace cast {
// This object is called external from the main cast thread and internally from
// the video encoder thread.
-class VideoEncoder : public VideoEncoderController,
- public base::RefCountedThreadSafe<VideoEncoder> {
+class VideoEncoder : public VideoEncoderController {
public:
typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
const base::TimeTicks&)> FrameEncodedCallback;
- VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ VideoEncoder(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
uint8 max_unacked_frames);
@@ -36,36 +39,36 @@ class VideoEncoder : public VideoEncoderController,
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
// Once the encoded frame is ready the frame_encoded_callback is called.
- bool EncodeVideoFrame(const I420VideoFrame* video_frame,
+ bool EncodeVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure frame_release_callback);
+ const FrameEncodedCallback& frame_encoded_callback);
protected:
struct CodecDynamicConfig {
bool key_frame_requested;
- uint8 latest_frame_id_to_reference;
+ uint32 latest_frame_id_to_reference;
int bit_rate;
};
// The actual encode, called from the video encoder thread.
void EncodeVideoFrameEncoderThread(
- const I420VideoFrame* video_frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& capture_time,
const CodecDynamicConfig& dynamic_config,
- const FrameEncodedCallback& frame_encoded_callback,
- const base::Closure frame_release_callback);
+ const FrameEncodedCallback& frame_encoded_callback);
// The following functions are called from the main cast thread.
virtual void SetBitRate(int new_bit_rate) OVERRIDE;
virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint8 frame_id) OVERRIDE;
+ virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
virtual int NumberOfSkippedFrames() const OVERRIDE;
private:
+ friend class base::RefCountedThreadSafe<VideoEncoder>;
+
const VideoSenderConfig video_config_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<Vp8Encoder> vp8_encoder_;
CodecDynamicConfig dynamic_config_;
bool skip_next_frame_;
diff --git a/chromium/media/cast/video_sender/video_encoder_unittest.cc b/chromium/media/cast/video_sender/video_encoder_unittest.cc
index d18a043b73b..b68b8364c43 100644
--- a/chromium/media/cast/video_sender/video_encoder_unittest.cc
+++ b/chromium/media/cast/video_sender/video_encoder_unittest.cc
@@ -7,9 +7,11 @@
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/video_utility.h"
#include "media/cast/video_sender/video_encoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,10 +20,7 @@ namespace cast {
using testing::_;
-static void ReleaseFrame(const I420VideoFrame* frame) {
- // Empty since we in this test send in the same frame.
-}
-
+namespace {
class TestVideoEncoderCallback :
public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
public:
@@ -46,18 +45,23 @@ class TestVideoEncoderCallback :
EXPECT_EQ(expected_capture_time_, capture_time);
}
+ protected:
+ virtual ~TestVideoEncoderCallback() {}
+
private:
+ friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
+
bool expected_key_frame_;
uint8 expected_frame_id_;
uint8 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
};
+} // namespace
class VideoEncoderTest : public ::testing::Test {
protected:
VideoEncoderTest()
- : pixels_(320 * 240, 123),
- test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ : test_video_encoder_callback_(new TestVideoEncoderCallback()) {
video_config_.sender_ssrc = 1;
video_config_.incoming_feedback_ssrc = 2;
video_config_.rtp_payload_type = 127;
@@ -72,43 +76,36 @@ class VideoEncoderTest : public ::testing::Test {
video_config_.max_frame_rate = 30;
video_config_.max_number_of_video_buffers_used = 3;
video_config_.codec = kVp8;
- video_frame_.width = 320;
- video_frame_.height = 240;
- video_frame_.y_plane.stride = video_frame_.width;
- video_frame_.y_plane.length = video_frame_.width;
- video_frame_.y_plane.data = &(pixels_[0]);
- video_frame_.u_plane.stride = video_frame_.width / 2;
- video_frame_.u_plane.length = video_frame_.width / 2;
- video_frame_.u_plane.data = &(pixels_[0]);
- video_frame_.v_plane.stride = video_frame_.width / 2;
- video_frame_.v_plane.length = video_frame_.width / 2;
- video_frame_.v_plane.data = &(pixels_[0]);
+ gfx::Size size(video_config_.width, video_config_.height);
+ video_frame_ = media::VideoFrame::CreateFrame(VideoFrame::I420,
+ size, gfx::Rect(size), size, base::TimeDelta());
+ PopulateVideoFrame(video_frame_, 123);
}
- ~VideoEncoderTest() {}
+ virtual ~VideoEncoderTest() {}
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
}
void Configure(uint8 max_unacked_frames) {
- video_encoder_= new VideoEncoder(cast_thread_, video_config_,
- max_unacked_frames);
+ video_encoder_.reset(new VideoEncoder(cast_environment_, video_config_,
+ max_unacked_frames));
video_encoder_controller_ = video_encoder_.get();
}
base::SimpleTestTickClock testing_clock_;
- std::vector<uint8> pixels_;
scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
VideoSenderConfig video_config_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
- scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<VideoEncoder> video_encoder_;
VideoEncoderController* video_encoder_controller_;
- I420VideoFrame video_frame_;
+ scoped_refptr<media::VideoFrame> video_frame_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
@@ -121,22 +118,22 @@ TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_controller_->LatestFrameIdToReference(0);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_controller_->LatestFrameIdToReference(1);
test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
@@ -144,13 +141,16 @@ TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
for (int i = 3; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
}
}
-TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
+// TODO(pwestin): Re-enabled after redesign the encoder to control number of
+// frames in flight.
+TEST_F(VideoEncoderTest,DISABLED_EncodePattern60fpsRunningOutOfAck) {
+ video_config_.max_number_of_video_buffers_used = 1;
Configure(6);
base::TimeTicks capture_time;
@@ -160,22 +160,22 @@ TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
@@ -183,13 +183,15 @@ TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
for (int i = 3; i < 9; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
}
}
-TEST_F(VideoEncoderTest, EncodePattern60fps200msDelayRunningOutOfAck) {
+// TODO(pwestin): Re-enabled after redesign the encoder to control number of
+// frames in flight.
+TEST_F(VideoEncoderTest, DISABLED_EncodePattern60fps200msDelayRunningOutOfAck) {
Configure(12);
base::TimeTicks capture_time;
@@ -199,44 +201,44 @@ TEST_F(VideoEncoderTest, EncodePattern60fps200msDelayRunningOutOfAck) {
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(2);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(3);
capture_time += base::TimeDelta::FromMilliseconds(33);
test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_controller_->LatestFrameIdToReference(4);
for (int i = 5; i < 17; ++i) {
test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
- EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
- frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(video_frame_, capture_time,
+ frame_encoded_callback));
task_runner_->RunTasks();
}
}
diff --git a/chromium/media/cast/video_sender/video_sender.cc b/chromium/media/cast/video_sender/video_sender.cc
index 1b422388324..7391fe8e645 100644
--- a/chromium/media/cast/video_sender/video_sender.cc
+++ b/chromium/media/cast/video_sender/video_sender.cc
@@ -9,8 +9,10 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "crypto/encryptor.h"
+#include "crypto/symmetric_key.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/video_sender/video_encoder.h"
namespace media {
@@ -24,29 +26,6 @@ class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
: video_sender_(video_sender) {
}
- virtual void OnReceivedSendReportRequest() OVERRIDE {}
-
- virtual void OnReceivedReportBlock(
- const RtcpReportBlock& report_block) OVERRIDE {}
-
- virtual void OnReceivedRpsi(uint8 payload_type,
- uint64 picture_id) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedNackRequest(
- const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
- NOTIMPLEMENTED();
- }
-
- virtual void OnReceivedIntraFrameRequest() OVERRIDE {
- video_sender_->OnReceivedIntraFrameRequest();
- }
-
virtual void OnReceivedCastFeedback(
const RtcpCastMessage& cast_feedback) OVERRIDE {
video_sender_->OnReceivedCastFeedback(cast_feedback);
@@ -72,31 +51,32 @@ class LocalRtpVideoSenderStatistics : public RtpSenderStatistics {
};
VideoSender::VideoSender(
- scoped_refptr<CastThread> cast_thread,
+ scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacedPacketSender* const paced_packet_sender)
- : incoming_feedback_ssrc_(video_config.incoming_feedback_ssrc),
- rtp_max_delay_(
+ : rtp_max_delay_(
base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
max_frame_rate_(video_config.max_frame_rate),
- cast_thread_(cast_thread),
+ cast_environment_(cast_environment),
rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
- rtp_sender_(new RtpSender(NULL, &video_config, paced_packet_sender)),
+ rtp_sender_(new RtpSender(cast_environment, NULL, &video_config,
+ paced_packet_sender)),
last_acked_frame_id_(-1),
last_sent_frame_id_(-1),
- last_sent_key_frame_id_(-1),
duplicate_ack_(0),
last_skip_count_(0),
- congestion_control_(video_config.congestion_control_back_off,
+ congestion_control_(cast_environment->Clock(),
+ video_config.congestion_control_back_off,
video_config.max_bitrate,
video_config.min_bitrate,
video_config.start_bitrate),
- clock_(&default_tick_clock_),
+ initialized_(false),
weak_factory_(this) {
max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
- video_config.max_frame_rate / 1000);
- DCHECK(max_unacked_frames_ > 0) << "Invalid argument";
+ video_config.max_frame_rate / 1000) + 1;
+ VLOG(1) << "max_unacked_frames " << static_cast<int>(max_unacked_frames_);
+ DCHECK_GT(max_unacked_frames_, 0) << "Invalid argument";
rtp_video_sender_statistics_.reset(
new LocalRtpVideoSenderStatistics(rtp_sender_.get()));
@@ -105,39 +85,59 @@ VideoSender::VideoSender(
DCHECK(video_encoder_controller) << "Invalid argument";
video_encoder_controller_ = video_encoder_controller;
} else {
- video_encoder_ = new VideoEncoder(cast_thread, video_config,
- max_unacked_frames_);
+ video_encoder_.reset(new VideoEncoder(cast_environment, video_config,
+ max_unacked_frames_));
video_encoder_controller_ = video_encoder_.get();
}
+
+ if (video_config.aes_iv_mask.size() == kAesKeySize &&
+ video_config.aes_key.size() == kAesKeySize) {
+ iv_mask_ = video_config.aes_iv_mask;
+ crypto::SymmetricKey* key = crypto::SymmetricKey::Import(
+ crypto::SymmetricKey::AES, video_config.aes_key);
+ encryptor_.reset(new crypto::Encryptor());
+ encryptor_->Init(key, crypto::Encryptor::CTR, std::string());
+ } else if (video_config.aes_iv_mask.size() != 0 ||
+ video_config.aes_key.size() != 0) {
+ DCHECK(false) << "Invalid crypto configuration";
+ }
+
rtcp_.reset(new Rtcp(
+ cast_environment_,
rtcp_feedback_.get(),
paced_packet_sender,
rtp_video_sender_statistics_.get(),
NULL,
video_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- true,
video_config.sender_ssrc,
+ video_config.incoming_feedback_ssrc,
video_config.rtcp_c_name));
-
- rtcp_->SetRemoteSSRC(video_config.incoming_feedback_ssrc);
- ScheduleNextRtcpReport();
- ScheduleNextResendCheck();
- ScheduleNextSkippedFramesCheck();
}
VideoSender::~VideoSender() {}
+void VideoSender::InitializeTimers() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ if (!initialized_) {
+ initialized_ = true;
+ ScheduleNextRtcpReport();
+ ScheduleNextResendCheck();
+ ScheduleNextSkippedFramesCheck();
+ }
+}
+
void VideoSender::InsertRawVideoFrame(
- const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback) {
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(video_encoder_.get()) << "Invalid state";
+ cast_environment_->Logging()->InsertFrameEvent(kVideoFrameReceived,
+ GetVideoRtpTimestamp(capture_time), kFrameIdUnknown);
if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
- weak_factory_.GetWeakPtr()), callback)) {
- VLOG(1) << "Failed to InsertRawVideoFrame";
+ weak_factory_.GetWeakPtr()))) {
}
}
@@ -146,9 +146,10 @@ void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::Closure callback) {
DCHECK(!video_encoder_.get()) << "Invalid state";
DCHECK(encoded_frame) << "Invalid argument";
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
SendEncodedVideoFrame(encoded_frame, capture_time);
- callback.Run();
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void VideoSender::SendEncodedVideoFrameMainThread(
@@ -157,119 +158,208 @@ void VideoSender::SendEncodedVideoFrameMainThread(
SendEncodedVideoFrame(video_frame.get(), capture_time);
}
+bool VideoSender::EncryptVideoFrame(const EncodedVideoFrame& video_frame,
+ EncodedVideoFrame* encrypted_frame) {
+ DCHECK(encryptor_) << "Invalid state";
+
+ if (!encryptor_->SetCounter(GetAesNonce(video_frame.frame_id, iv_mask_))) {
+ NOTREACHED() << "Failed to set counter";
+ return false;
+ }
+
+ if (!encryptor_->Encrypt(video_frame.data, &encrypted_frame->data)) {
+ NOTREACHED() << "Encrypt error";
+ return false;
+ }
+ encrypted_frame->codec = video_frame.codec;
+ encrypted_frame->key_frame = video_frame.key_frame;
+ encrypted_frame->frame_id = video_frame.frame_id;
+ encrypted_frame->last_referenced_frame_id =
+ video_frame.last_referenced_frame_id;
+ return true;
+}
+
void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
const base::TimeTicks& capture_time) {
- last_send_time_ = clock_->NowTicks();
- rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
+
+ if (encryptor_) {
+ EncodedVideoFrame encrypted_video_frame;
+
+ if (!EncryptVideoFrame(*encoded_frame, &encrypted_video_frame)) {
+ // Logging already done.
+ return;
+ }
+ rtp_sender_->IncomingEncodedVideoFrame(&encrypted_video_frame,
+ capture_time);
+ } else {
+ rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ }
if (encoded_frame->key_frame) {
- last_sent_key_frame_id_ = encoded_frame->frame_id;
+ VLOG(1) << "Send encoded key frame; frame_id:"
+ << static_cast<int>(encoded_frame->frame_id);
}
- last_sent_frame_id_ = encoded_frame->frame_id;
+ last_sent_frame_id_ = static_cast<int>(encoded_frame->frame_id);
UpdateFramesInFlight();
+ InitializeTimers();
}
-void VideoSender::OnReceivedIntraFrameRequest() {
- if (last_sent_key_frame_id_ != -1) {
- uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
- static_cast<uint8>(last_sent_key_frame_id_);
- if (frames_in_flight < (max_unacked_frames_ - 1)) return;
- }
- video_encoder_controller_->GenerateKeyFrame();
- last_acked_frame_id_ = -1;
- last_sent_frame_id_ = -1;
-}
-
-void VideoSender::IncomingRtcpPacket(const uint8* packet, int length,
+void VideoSender::IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
rtcp_->IncomingRtcpPacket(packet, length);
- cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, callback);
+ cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void VideoSender::ScheduleNextRtcpReport() {
- base::TimeDelta time_to_next =
- rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
+ cast_environment_->Clock()->NowTicks();
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
time_to_next);
}
void VideoSender::SendRtcpReport() {
- rtcp_->SendRtcpReport(incoming_feedback_ssrc_);
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ RtcpSenderLogMessage sender_log_message;
+ const FrameRawMap& frame_raw_map =
+ cast_environment_->Logging()->GetFrameRawData();
+
+ FrameRawMap::const_iterator it = frame_raw_map.begin();
+ while (it != frame_raw_map.end()) {
+ RtcpSenderFrameLogMessage frame_message;
+ frame_message.rtp_timestamp = it->first;
+ frame_message.frame_status = kRtcpSenderFrameStatusUnknown;
+ if (it->second.type.empty()) {
+ ++it;
+ continue;
+ }
+ CastLoggingEvent last_event = it->second.type.back();
+ switch (last_event) {
+ case kVideoFrameCaptured:
+ frame_message.frame_status = kRtcpSenderFrameStatusDroppedByFlowControl;
+ break;
+ case kVideoFrameSentToEncoder:
+ frame_message.frame_status = kRtcpSenderFrameStatusDroppedByEncoder;
+ break;
+ case kVideoFrameEncoded:
+ frame_message.frame_status = kRtcpSenderFrameStatusSentToNetwork;
+ break;
+ default:
+ ++it;
+ continue;
+ }
+ ++it;
+ if (it == frame_raw_map.end()) {
+ // Last message on our map; only send if it is kVideoFrameEncoded.
+ if (last_event != kVideoFrameEncoded) {
+ // For other events we will wait for it to finish and report the result
+ // in the next report.
+ break;
+ }
+ }
+ sender_log_message.push_back(frame_message);
+ }
+ rtcp_->SendRtcpFromRtpSender(&sender_log_message);
+ if (!sender_log_message.empty()) {
+ VLOG(1) << "Failed to send all log messages";
+ }
+
+ // TODO(pwestin): When we start pulling out the logging by other means we need
+ // to synchronize this.
+ cast_environment_->Logging()->Reset();
ScheduleNextRtcpReport();
}
void VideoSender::ScheduleNextResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next;
if (last_send_time_.is_null()) {
time_to_next = rtp_max_delay_;
} else {
- time_to_next = last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+ time_to_next = last_send_time_ - cast_environment_->Clock()->NowTicks() +
+ rtp_max_delay_;
}
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::ResendCheck, weak_factory_.GetWeakPtr()),
time_to_next);
}
void VideoSender::ResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
- base::TimeDelta time_to_next =
- last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
-
- if (last_acked_frame_id_ == -1) {
- // We have not received any ack, send a key frame.
- video_encoder_controller_->GenerateKeyFrame();
- last_acked_frame_id_ = -1;
- last_sent_frame_id_ = -1;
- UpdateFramesInFlight();
- } else {
- ResendFrame(static_cast<uint8>(last_acked_frame_id_ + 1));
+ base::TimeDelta time_since_last_send =
+ cast_environment_->Clock()->NowTicks() - last_send_time_;
+ if (time_since_last_send > rtp_max_delay_) {
+ if (last_acked_frame_id_ == -1) {
+ // We have not received any ack, send a key frame.
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+ UpdateFramesInFlight();
+ } else {
+ DCHECK_LE(0, last_acked_frame_id_);
+
+ uint32 frame_id = static_cast<uint32>(last_acked_frame_id_ + 1);
+ VLOG(1) << "ACK timeout resend frame:" << static_cast<int>(frame_id);
+ ResendFrame(frame_id);
+ }
}
}
ScheduleNextResendCheck();
}
void VideoSender::ScheduleNextSkippedFramesCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_next;
if (last_checked_skip_count_time_.is_null()) {
time_to_next =
base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
} else {
- time_to_next = last_checked_skip_count_time_ - clock_->NowTicks() +
- base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ time_to_next = last_checked_skip_count_time_ -
+ cast_environment_->Clock()->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
}
time_to_next = std::max(time_to_next,
base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
base::Bind(&VideoSender::SkippedFramesCheck, weak_factory_.GetWeakPtr()),
time_to_next);
}
void VideoSender::SkippedFramesCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
if (skip_count - last_skip_count_ >
kSkippedFramesThreshold * max_frame_rate_) {
// TODO(pwestin): Propagate this up to the application.
}
last_skip_count_ = skip_count;
- last_checked_skip_count_time_ = clock_->NowTicks();
+ last_checked_skip_count_time_ = cast_environment_->Clock()->NowTicks();
ScheduleNextSkippedFramesCheck();
}
void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta rtt;
base::TimeDelta avg_rtt;
base::TimeDelta min_rtt;
base::TimeDelta max_rtt;
if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ cast_environment_->Logging()->InsertGenericEvent(kRttMs,
+ rtt.InMilliseconds());
// Don't use a RTT lower than our average.
rtt = std::max(rtt, avg_rtt);
} else {
@@ -284,30 +374,33 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
video_encoder_controller_->LatestFrameIdToReference(
cast_feedback.ack_frame_id_);
- if (static_cast<uint8>(last_acked_frame_id_ + 1) ==
+ if (static_cast<uint32>(last_acked_frame_id_ + 1) ==
cast_feedback.ack_frame_id_) {
uint32 new_bitrate = 0;
if (congestion_control_.OnAck(rtt, &new_bitrate)) {
video_encoder_controller_->SetBitRate(new_bitrate);
}
}
- if (last_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ if (static_cast<uint32>(last_acked_frame_id_) == cast_feedback.ack_frame_id_
// We only count duplicate ACKs when we have sent newer frames.
- IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
+ && IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
duplicate_ack_++;
} else {
duplicate_ack_ = 0;
}
if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
// Resend last ACK + 1 frame.
- resend_frame = static_cast<uint8>(last_acked_frame_id_ + 1);
+ resend_frame = static_cast<uint32>(last_acked_frame_id_ + 1);
}
if (resend_frame != -1) {
- ResendFrame(static_cast<uint8>(resend_frame));
+ DCHECK_LE(0, resend_frame);
+ VLOG(1) << "Received duplicate ACK for frame:"
+ << static_cast<int>(resend_frame);
+ ResendFrame(static_cast<uint32>(resend_frame));
}
} else {
rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
- last_send_time_ = clock_->NowTicks();
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
uint32 new_bitrate = 0;
if (congestion_control_.OnNack(rtt, &new_bitrate)) {
@@ -317,15 +410,30 @@ void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
ReceivedAck(cast_feedback.ack_frame_id_);
}
-void VideoSender::ReceivedAck(uint8 acked_frame_id) {
+void VideoSender::ReceivedAck(uint32 acked_frame_id) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ last_acked_frame_id_ = static_cast<int>(acked_frame_id);
+ cast_environment_->Logging()->InsertGenericEvent(kAckReceived,
+ acked_frame_id);
+ VLOG(1) << "ReceivedAck:" << static_cast<int>(acked_frame_id);
last_acked_frame_id_ = acked_frame_id;
UpdateFramesInFlight();
}
void VideoSender::UpdateFramesInFlight() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (last_sent_frame_id_ != -1) {
- uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
- static_cast<uint8>(last_acked_frame_id_);
+ DCHECK_LE(0, last_sent_frame_id_);
+ uint32 frames_in_flight;
+ if (last_acked_frame_id_ != -1) {
+ DCHECK_LE(0, last_acked_frame_id_);
+ frames_in_flight = static_cast<uint32>(last_sent_frame_id_) -
+ static_cast<uint32>(last_acked_frame_id_);
+ } else {
+ frames_in_flight = static_cast<uint32>(last_sent_frame_id_) + 1;
+ }
+ VLOG(1) << "Frames in flight; last sent: " << last_sent_frame_id_
+ << " last acked:" << last_acked_frame_id_;
if (frames_in_flight >= max_unacked_frames_) {
video_encoder_controller_->SkipNextFrame(true);
return;
@@ -334,12 +442,13 @@ void VideoSender::UpdateFramesInFlight() {
video_encoder_controller_->SkipNextFrame(false);
}
-void VideoSender::ResendFrame(uint8 resend_frame_id) {
+void VideoSender::ResendFrame(uint32 resend_frame_id) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
MissingFramesAndPacketsMap missing_frames_and_packets;
PacketIdSet missing;
missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
rtp_sender_->ResendPackets(missing_frames_and_packets);
- last_send_time_ = clock_->NowTicks();
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
}
} // namespace cast
diff --git a/chromium/media/cast/video_sender/video_sender.gypi b/chromium/media/cast/video_sender/video_sender.gypi
index 9499066165f..e91a8c97efe 100644
--- a/chromium/media/cast/video_sender/video_sender.gypi
+++ b/chromium/media/cast/video_sender/video_sender.gypi
@@ -21,8 +21,11 @@
'video_sender.cc',
], # source
'dependencies': [
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
- '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/media/cast/net/rtp_sender/rtp_sender.gyp:*',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'congestion_control',
'cast_vp8_encoder',
],
diff --git a/chromium/media/cast/video_sender/video_sender.h b/chromium/media/cast/video_sender/video_sender.h
index 9098e975c4f..eb7b5ea4f21 100644
--- a/chromium/media/cast/video_sender/video_sender.h
+++ b/chromium/media/cast/video_sender/video_sender.h
@@ -10,14 +10,21 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
-#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/cast_thread.h"
+#include "media/cast/cast_environment.h"
#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/net/rtp_sender/rtp_sender.h"
#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace crypto {
+ class Encryptor;
+}
+
+namespace media {
+class VideoFrame;
+}
namespace media {
namespace cast {
@@ -37,7 +44,7 @@ class PacedPacketSender;
class VideoSender : public base::NonThreadSafe,
public base::SupportsWeakPtr<VideoSender> {
public:
- VideoSender(scoped_refptr<CastThread> cast_thread,
+ VideoSender(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacedPacketSender* const paced_packet_sender);
@@ -49,9 +56,8 @@ class VideoSender : public base::NonThreadSafe,
// the encoder is done with the frame; it does not mean that the encoded frame
// has been sent out.
void InsertRawVideoFrame(
- const I420VideoFrame* video_frame,
- const base::TimeTicks& capture_time,
- const base::Closure callback);
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time);
// The video_frame must be valid until the closure callback is called.
// The closure callback is called from the main thread as soon as
@@ -62,16 +68,9 @@ class VideoSender : public base::NonThreadSafe,
const base::Closure callback);
// Only called from the main cast thread.
- void IncomingRtcpPacket(const uint8* packet, int length,
+ void IncomingRtcpPacket(const uint8* packet, size_t length,
const base::Closure callback);
- void set_clock(base::TickClock* clock) {
- clock_ = clock;
- congestion_control_.set_clock(clock);
- rtcp_->set_clock(clock);
- rtp_sender_->set_clock(clock);
- }
-
protected:
// Protected for testability.
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
@@ -100,39 +99,43 @@ class VideoSender : public base::NonThreadSafe,
void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
const base::TimeTicks& capture_time);
- void OnReceivedIntraFrameRequest();
- void ResendFrame(uint8 resend_frame_id);
- void ReceivedAck(uint8 acked_frame_id);
+ void ResendFrame(uint32 resend_frame_id);
+ void ReceivedAck(uint32 acked_frame_id);
void UpdateFramesInFlight();
void SendEncodedVideoFrameMainThread(
scoped_ptr<EncodedVideoFrame> video_frame,
const base::TimeTicks& capture_time);
- const uint32 incoming_feedback_ssrc_;
+ void InitializeTimers();
+
+ // Caller must allocate the destination |encrypted_video_frame| the data
+ // member will be resized to hold the encrypted size.
+ bool EncryptVideoFrame(const EncodedVideoFrame& encoded_frame,
+ EncodedVideoFrame* encrypted_video_frame);
+
const base::TimeDelta rtp_max_delay_;
const int max_frame_rate_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
- scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<VideoEncoder> video_encoder_;
scoped_ptr<Rtcp> rtcp_;
scoped_ptr<RtpSender> rtp_sender_;
VideoEncoderController* video_encoder_controller_;
uint8 max_unacked_frames_;
+ scoped_ptr<crypto::Encryptor> encryptor_;
+ std::string iv_mask_;
int last_acked_frame_id_;
int last_sent_frame_id_;
- int last_sent_key_frame_id_;
int duplicate_ack_;
base::TimeTicks last_send_time_;
base::TimeTicks last_checked_skip_count_time_;
int last_skip_count_;
CongestionControl congestion_control_;
- base::DefaultTickClock default_tick_clock_;
- base::TickClock* clock_;
-
+ bool initialized_;
base::WeakPtrFactory<VideoSender> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VideoSender);
diff --git a/chromium/media/cast/video_sender/video_sender_unittest.cc b/chromium/media/cast/video_sender/video_sender_unittest.cc
index 72582a7ff3c..c4968415ffb 100644
--- a/chromium/media/cast/video_sender/video_sender_unittest.cc
+++ b/chromium/media/cast/video_sender/video_sender_unittest.cc
@@ -7,10 +7,12 @@
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/cast_thread.h"
-#include "media/cast/pacing/mock_paced_packet_sender.h"
-#include "media/cast/pacing/paced_sender.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/test/fake_task_runner.h"
+#include "media/cast/test/video_utility.h"
#include "media/cast/video_sender/mock_video_encoder_controller.h"
#include "media/cast/video_sender/video_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -19,32 +21,29 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = 123456789;
+namespace {
+static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
+static const uint8 kPixelValue = 123;
+static const int kWidth = 320;
+static const int kHeight = 240;
+}
using testing::_;
+using testing::AtLeast;
+namespace {
class PeerVideoSender : public VideoSender {
public:
- PeerVideoSender(scoped_refptr<CastThread> cast_thread,
+ PeerVideoSender(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
PacedPacketSender* const paced_packet_sender)
- : VideoSender(cast_thread, video_config, video_encoder_controller,
- paced_packet_sender) {
+ : VideoSender(cast_environment, video_config,
+ video_encoder_controller, paced_packet_sender) {
}
using VideoSender::OnReceivedCastFeedback;
};
-
-static void ReleaseVideoFrame(const I420VideoFrame* frame) {
- delete [] frame->y_plane.data;
- delete [] frame->u_plane.data;
- delete [] frame->v_plane.data;
- delete frame;
-}
-
-static void ReleaseEncodedFrame(const EncodedVideoFrame* frame) {
- // Do nothing.
-}
+} // namespace
class VideoSenderTest : public ::testing::Test {
protected:
@@ -53,7 +52,7 @@ class VideoSenderTest : public ::testing::Test {
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
- ~VideoSenderTest() {}
+ virtual ~VideoSenderTest() {}
void InitEncoder(bool external) {
VideoSenderConfig video_config;
@@ -61,56 +60,39 @@ class VideoSenderTest : public ::testing::Test {
video_config.incoming_feedback_ssrc = 2;
video_config.rtp_payload_type = 127;
video_config.use_external_encoder = external;
- video_config.width = 320;
- video_config.height = 240;
+ video_config.width = kWidth;
+ video_config.height = kHeight;
video_config.max_bitrate = 5000000;
video_config.min_bitrate = 1000000;
video_config.start_bitrate = 1000000;
video_config.max_qp = 56;
video_config.min_qp = 0;
video_config.max_frame_rate = 30;
- video_config.max_number_of_video_buffers_used = 3;
+ video_config.max_number_of_video_buffers_used = 1;
video_config.codec = kVp8;
if (external) {
- video_sender_.reset(new PeerVideoSender(cast_thread_, video_config,
- &mock_video_encoder_controller_, &mock_transport_));
+ video_sender_.reset(new PeerVideoSender(cast_environment_,
+ video_config, &mock_video_encoder_controller_, &mock_transport_));
} else {
- video_sender_.reset(new PeerVideoSender(cast_thread_, video_config, NULL,
- &mock_transport_));
+ video_sender_.reset(new PeerVideoSender(cast_environment_, video_config,
+ NULL, &mock_transport_));
}
- video_sender_->set_clock(&testing_clock_);
}
virtual void SetUp() {
task_runner_ = new test::FakeTaskRunner(&testing_clock_);
- cast_thread_ = new CastThread(task_runner_, task_runner_, task_runner_,
- task_runner_, task_runner_);
+ cast_environment_ = new CastEnvironment(&testing_clock_, task_runner_,
+ task_runner_, task_runner_, task_runner_, task_runner_,
+ GetDefaultCastLoggingConfig());
}
- I420VideoFrame* AllocateNewVideoFrame() {
- I420VideoFrame* video_frame = new I420VideoFrame();
- video_frame->width = 320;
- video_frame->height = 240;
-
- video_frame->y_plane.stride = video_frame->width;
- video_frame->y_plane.length = video_frame->width;
- video_frame->y_plane.data =
- new uint8[video_frame->width * video_frame->height];
- memset(video_frame->y_plane.data, 123,
- video_frame->width * video_frame->height);
- video_frame->u_plane.stride = video_frame->width / 2;
- video_frame->u_plane.length = video_frame->width / 2;
- video_frame->u_plane.data =
- new uint8[video_frame->width * video_frame->height / 4];
- memset(video_frame->u_plane.data, 123,
- video_frame->width * video_frame->height / 4);
- video_frame->v_plane.stride = video_frame->width / 2;
- video_frame->v_plane.length = video_frame->width / 2;
- video_frame->v_plane.data =
- new uint8[video_frame->width * video_frame->height / 4];
- memset(video_frame->v_plane.data, 123,
- video_frame->width * video_frame->height / 4);
+ scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ size, base::TimeDelta());
+ PopulateVideoFrame(video_frame, kPixelValue);
return video_frame;
}
@@ -119,24 +101,23 @@ class VideoSenderTest : public ::testing::Test {
MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_ptr<PeerVideoSender> video_sender_;
- scoped_refptr<CastThread> cast_thread_;
+ scoped_refptr<CastEnvironment> cast_environment_;
};
TEST_F(VideoSenderTest, BuiltInEncoder) {
- EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
InitEncoder(false);
- I420VideoFrame* video_frame = AllocateNewVideoFrame();
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
}
TEST_F(VideoSenderTest, ExternalEncoder) {
- EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(1);
EXPECT_CALL(mock_video_encoder_controller_, SkipNextFrame(false)).Times(1);
InitEncoder(true);
@@ -147,15 +128,30 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
video_frame.key_frame = true;
video_frame.frame_id = 0;
video_frame.last_referenced_frame_id = 0;
- video_frame.data.insert(video_frame.data.begin(), 123, 1000);
+ video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
- base::Bind(&ReleaseEncodedFrame, &video_frame));
+ base::Bind(base::DoNothing));
}
TEST_F(VideoSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(AtLeast(1));
EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
- InitEncoder(false);
+ EXPECT_CALL(mock_video_encoder_controller_,
+ SkipNextFrame(false)).Times(AtLeast(1));
+ InitEncoder(true);
+
+ EncodedVideoFrame video_frame;
+ base::TimeTicks capture_time;
+
+ video_frame.codec = kVp8;
+ video_frame.key_frame = true;
+ video_frame.frame_id = 0;
+ video_frame.last_referenced_frame_id = 0;
+ video_frame.data.insert(video_frame.data.begin(), 1000, kPixelValue);
+
+ video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
+ base::Bind(base::DoNothing));
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
@@ -166,16 +162,15 @@ TEST_F(VideoSenderTest, RtcpTimer) {
}
TEST_F(VideoSenderTest, ResendTimer) {
- EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(2);
- EXPECT_CALL(mock_transport_, ResendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_transport_, SendPackets(_)).Times(2);
+ EXPECT_CALL(mock_transport_, ResendPackets(_)).Times(1);
InitEncoder(false);
- I420VideoFrame* video_frame = AllocateNewVideoFrame();
+ scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
@@ -185,9 +180,8 @@ TEST_F(VideoSenderTest, ResendTimer) {
cast_feedback.ack_frame_id_ = 0;
video_sender_->OnReceivedCastFeedback(cast_feedback);
- video_frame = AllocateNewVideoFrame();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time,
- base::Bind(&ReleaseVideoFrame, video_frame));
+ video_frame = GetNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index 33717e03a58..de6f83474f0 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -4,15 +4,12 @@
#include "media/cdm/aes_decryptor.h"
+#include <list>
#include <vector>
-#include "base/base64.h"
-#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_util.h"
-#include "base/values.h"
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
#include "media/base/audio_decoder_config.h"
@@ -20,18 +17,92 @@
#include "media/base/decrypt_config.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
+#include "media/cdm/json_web_key.h"
namespace media {
-uint32 AesDecryptor::next_session_id_ = 1;
+// Keeps track of the session IDs and DecryptionKeys. The keys are ordered by
+// insertion time (last insertion is first). It takes ownership of the
+// DecryptionKeys.
+class AesDecryptor::SessionIdDecryptionKeyMap {
+ // Use a std::list to actually hold the data. Insertion is always done
+ // at the front, so the "latest" decryption key is always the first one
+ // in the list.
+ typedef std::list<std::pair<uint32, DecryptionKey*> > KeyList;
+
+ public:
+ SessionIdDecryptionKeyMap() {}
+ ~SessionIdDecryptionKeyMap() { STLDeleteValues(&key_list_); }
+
+ // Replaces value if |session_id| is already present, or adds it if not.
+ // This |decryption_key| becomes the latest until another insertion or
+ // |session_id| is erased.
+ void Insert(uint32 session_id, scoped_ptr<DecryptionKey> decryption_key);
+
+ // Deletes the entry for |session_id| if present.
+ void Erase(const uint32 session_id);
+
+ // Returns whether the list is empty
+ bool Empty() const { return key_list_.empty(); }
+
+ // Returns the last inserted DecryptionKey.
+ DecryptionKey* LatestDecryptionKey() {
+ DCHECK(!key_list_.empty());
+ return key_list_.begin()->second;
+ }
+
+ private:
+ // Searches the list for an element with |session_id|.
+ KeyList::iterator Find(const uint32 session_id);
+
+ // Deletes the entry pointed to by |position|.
+ void Erase(KeyList::iterator position);
+
+ KeyList key_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(SessionIdDecryptionKeyMap);
+};
+
+void AesDecryptor::SessionIdDecryptionKeyMap::Insert(
+ uint32 session_id,
+ scoped_ptr<DecryptionKey> decryption_key) {
+ KeyList::iterator it = Find(session_id);
+ if (it != key_list_.end())
+ Erase(it);
+ DecryptionKey* raw_ptr = decryption_key.release();
+ key_list_.push_front(std::make_pair(session_id, raw_ptr));
+}
+
+void AesDecryptor::SessionIdDecryptionKeyMap::Erase(const uint32 session_id) {
+ KeyList::iterator it = Find(session_id);
+ if (it == key_list_.end())
+ return;
+ Erase(it);
+}
+
+AesDecryptor::SessionIdDecryptionKeyMap::KeyList::iterator
+AesDecryptor::SessionIdDecryptionKeyMap::Find(const uint32 session_id) {
+ for (KeyList::iterator it = key_list_.begin(); it != key_list_.end(); ++it) {
+ if (it->first == session_id)
+ return it;
+ }
+ return key_list_.end();
+}
+
+void AesDecryptor::SessionIdDecryptionKeyMap::Erase(
+ KeyList::iterator position) {
+ DCHECK(position->second);
+ delete position->second;
+ key_list_.erase(position);
+}
+
+uint32 AesDecryptor::next_web_session_id_ = 1;
enum ClearBytesBufferSel {
kSrcContainsClearBytes,
kDstContainsClearBytes
};
-typedef std::vector<std::pair<std::string, std::string> > JWKKeys;
-
static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
const ClearBytesBufferSel sel,
const uint8* src,
@@ -49,105 +120,6 @@ static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
}
}
-// Processes a JSON Web Key to extract the key id and key value. Adds the
-// id/value pair to |jwk_keys| and returns true on success.
-static bool ProcessSymmetricKeyJWK(const DictionaryValue& jwk,
- JWKKeys* jwk_keys) {
- // A symmetric keys JWK looks like the following in JSON:
- // { "kty":"oct",
- // "kid":"AAECAwQFBgcICQoLDA0ODxAREhM=",
- // "k":"FBUWFxgZGhscHR4fICEiIw==" }
- // There may be other properties specified, but they are ignored.
- // Ref: http://tools.ietf.org/html/draft-ietf-jose-json-web-key-14
- // and:
- // http://tools.ietf.org/html/draft-jones-jose-json-private-and-symmetric-key-00
-
- // Have found a JWK, start by checking that it is a symmetric key.
- std::string type;
- if (!jwk.GetString("kty", &type) || type != "oct") {
- DVLOG(1) << "JWK is not a symmetric key";
- return false;
- }
-
- // Get the key id and actual key parameters.
- std::string encoded_key_id;
- std::string encoded_key;
- if (!jwk.GetString("kid", &encoded_key_id)) {
- DVLOG(1) << "Missing 'kid' parameter";
- return false;
- }
- if (!jwk.GetString("k", &encoded_key)) {
- DVLOG(1) << "Missing 'k' parameter";
- return false;
- }
-
- // Key ID and key are base64-encoded strings, so decode them.
- // TODO(jrummell): The JWK spec and the EME spec don't say that 'kid' must be
- // base64-encoded (they don't say anything at all). Verify with the EME spec.
- std::string decoded_key_id;
- std::string decoded_key;
- if (!base::Base64Decode(encoded_key_id, &decoded_key_id) ||
- decoded_key_id.empty()) {
- DVLOG(1) << "Invalid 'kid' value";
- return false;
- }
- if (!base::Base64Decode(encoded_key, &decoded_key) ||
- decoded_key.length() !=
- static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
- DVLOG(1) << "Invalid length of 'k' " << decoded_key.length();
- return false;
- }
-
- // Add the decoded key ID and the decoded key to the list.
- jwk_keys->push_back(std::make_pair(decoded_key_id, decoded_key));
- return true;
-}
-
-// Extracts the JSON Web Keys from a JSON Web Key Set. If |input| looks like
-// a valid JWK Set, then true is returned and |jwk_keys| is updated to contain
-// the list of keys found. Otherwise return false.
-static bool ExtractJWKKeys(const std::string& input, JWKKeys* jwk_keys) {
- // TODO(jrummell): The EME spec references a smaller set of allowed ASCII
- // values. Verify with spec that the smaller character set is needed.
- if (!IsStringASCII(input))
- return false;
-
- scoped_ptr<Value> root(base::JSONReader().ReadToValue(input));
- if (!root.get() || root->GetType() != Value::TYPE_DICTIONARY)
- return false;
-
- // A JSON Web Key Set looks like the following in JSON:
- // { "keys": [ JWK1, JWK2, ... ] }
- // (See ProcessSymmetricKeyJWK() for description of JWK.)
- // There may be other properties specified, but they are ignored.
- // Locate the set from the dictionary.
- DictionaryValue* dictionary = static_cast<DictionaryValue*>(root.get());
- ListValue* list_val = NULL;
- if (!dictionary->GetList("keys", &list_val)) {
- DVLOG(1) << "Missing 'keys' parameter or not a list in JWK Set";
- return false;
- }
-
- // Create a local list of keys, so that |jwk_keys| only gets updated on
- // success.
- JWKKeys local_keys;
- for (size_t i = 0; i < list_val->GetSize(); ++i) {
- DictionaryValue* jwk = NULL;
- if (!list_val->GetDictionary(i, &jwk)) {
- DVLOG(1) << "Unable to access 'keys'[" << i << "] in JWK Set";
- return false;
- }
- if (!ProcessSymmetricKeyJWK(*jwk, &local_keys)) {
- DVLOG(1) << "Error from 'keys'[" << i << "]";
- return false;
- }
- }
-
- // Successfully processed all JWKs in the set.
- jwk_keys->swap(local_keys);
- return true;
-}
-
// Decrypts |input| using |key|. Returns a DecoderBuffer with the decrypted
// data if decryption succeeded or NULL if decryption failed.
static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
@@ -246,22 +218,30 @@ static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
return output;
}
-AesDecryptor::AesDecryptor(const KeyAddedCB& key_added_cb,
- const KeyErrorCB& key_error_cb,
- const KeyMessageCB& key_message_cb)
- : key_added_cb_(key_added_cb),
- key_error_cb_(key_error_cb),
- key_message_cb_(key_message_cb) {
-}
+AesDecryptor::AesDecryptor(const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb)
+ : session_created_cb_(session_created_cb),
+ session_message_cb_(session_message_cb),
+ session_ready_cb_(session_ready_cb),
+ session_closed_cb_(session_closed_cb),
+ session_error_cb_(session_error_cb) {}
AesDecryptor::~AesDecryptor() {
- STLDeleteValues(&key_map_);
+ key_map_.clear();
}
-bool AesDecryptor::GenerateKeyRequest(const std::string& type,
- const uint8* init_data,
- int init_data_length) {
- std::string session_id_string(base::UintToString(next_session_id_++));
+bool AesDecryptor::CreateSession(uint32 session_id,
+ const std::string& type,
+ const uint8* init_data,
+ int init_data_length) {
+ // Validate that this is a new session.
+ DCHECK(valid_sessions_.find(session_id) == valid_sessions_.end());
+ valid_sessions_.insert(session_id);
+
+ std::string web_session_id_string(base::UintToString(next_web_session_id_++));
// For now, the AesDecryptor does not care about |type|;
// just fire the event with the |init_data| as the request.
@@ -269,74 +249,41 @@ bool AesDecryptor::GenerateKeyRequest(const std::string& type,
if (init_data && init_data_length)
message.assign(init_data, init_data + init_data_length);
- key_message_cb_.Run(session_id_string, message, std::string());
+ session_created_cb_.Run(session_id, web_session_id_string);
+ session_message_cb_.Run(session_id, message, std::string());
return true;
}
-void AesDecryptor::AddKey(const uint8* key,
- int key_length,
- const uint8* init_data,
- int init_data_length,
- const std::string& session_id) {
- CHECK(key);
- CHECK_GT(key_length, 0);
-
- // AddKey() is called from update(), where the key(s) are passed as a JSON
- // Web Key (JWK) set. Each JWK needs to be a symmetric key ('kty' = "oct"),
- // with 'kid' being the base64-encoded key id, and 'k' being the
- // base64-encoded key.
- //
- // For backwards compatibility with v0.1b of the spec (where |key| is the raw
- // key and |init_data| is the key id), if |key| is not valid JSON, then
- // attempt to process it as a raw key.
-
- // TODO(xhwang): Add |session_id| check after we figure out how:
- // https://www.w3.org/Bugs/Public/show_bug.cgi?id=16550
-
- std::string key_string(reinterpret_cast<const char*>(key), key_length);
- JWKKeys jwk_keys;
- if (ExtractJWKKeys(key_string, &jwk_keys)) {
- // Since |key| represents valid JSON, init_data must be empty.
- DCHECK(!init_data);
- DCHECK_EQ(init_data_length, 0);
-
- // Make sure that at least one key was extracted.
- if (jwk_keys.empty()) {
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
- for (JWKKeys::iterator it = jwk_keys.begin() ; it != jwk_keys.end(); ++it) {
- if (!AddDecryptionKey(it->first, it->second)) {
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
- return;
- }
- }
- } else {
- // v0.1b backwards compatibility support.
- // TODO(jrummell): Remove this code once v0.1b no longer supported.
+void AesDecryptor::UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) {
+ CHECK(response);
+ CHECK_GT(response_length, 0);
+ DCHECK(valid_sessions_.find(session_id) != valid_sessions_.end());
+
+ std::string key_string(reinterpret_cast<const char*>(response),
+ response_length);
+ KeyIdAndKeyPairs keys;
+ if (!ExtractKeysFromJWKSet(key_string, &keys)) {
+ session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
+
+ // Make sure that at least one key was extracted.
+ if (keys.empty()) {
+ session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ return;
+ }
- if (key_string.length() !=
+ for (KeyIdAndKeyPairs::iterator it = keys.begin(); it != keys.end(); ++it) {
+ if (it->second.length() !=
static_cast<size_t>(DecryptConfig::kDecryptionKeySize)) {
DVLOG(1) << "Invalid key length: " << key_string.length();
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
return;
}
-
- // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
- // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
- static const uint8 kDummyInitData[1] = {0};
- if (!init_data) {
- init_data = kDummyInitData;
- init_data_length = arraysize(kDummyInitData);
- }
-
- // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
- // compliant later (http://crbug.com/123262, http://crbug.com/123265).
- std::string key_id_string(reinterpret_cast<const char*>(init_data),
- init_data_length);
- if (!AddDecryptionKey(key_id_string, key_string)) {
- // Error logged in AddDecryptionKey()
- key_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
+ if (!AddDecryptionKey(session_id, it->first, it->second)) {
+ session_error_cb_.Run(session_id, MediaKeys::kUnknownError, 0);
return;
}
}
@@ -347,10 +294,17 @@ void AesDecryptor::AddKey(const uint8* key,
if (!new_video_key_cb_.is_null())
new_video_key_cb_.Run();
- key_added_cb_.Run(session_id);
+ session_ready_cb_.Run(session_id);
}
-void AesDecryptor::CancelKeyRequest(const std::string& session_id) {
+void AesDecryptor::ReleaseSession(uint32 session_id) {
+ // Validate that this is a reference to an active session and then forget it.
+ std::set<uint32>::iterator it = valid_sessions_.find(session_id);
+ DCHECK(it != valid_sessions_.end());
+ valid_sessions_.erase(it);
+
+ DeleteKeysForSession(session_id);
+ session_closed_cb_.Run(session_id);
}
Decryptor* AesDecryptor::GetDecryptor() {
@@ -441,7 +395,8 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
NOTREACHED() << "AesDecryptor does not support audio/video decoding";
}
-bool AesDecryptor::AddDecryptionKey(const std::string& key_id,
+bool AesDecryptor::AddDecryptionKey(const uint32 session_id,
+ const std::string& key_id,
const std::string& key_string) {
scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
if (!decryption_key) {
@@ -455,23 +410,49 @@ bool AesDecryptor::AddDecryptionKey(const std::string& key_id,
}
base::AutoLock auto_lock(key_map_lock_);
- KeyMap::iterator found = key_map_.find(key_id);
- if (found != key_map_.end()) {
- delete found->second;
- key_map_.erase(found);
+ KeyIdToSessionKeysMap::iterator key_id_entry = key_map_.find(key_id);
+ if (key_id_entry != key_map_.end()) {
+ key_id_entry->second->Insert(session_id, decryption_key.Pass());
+ return true;
}
- key_map_[key_id] = decryption_key.release();
+
+ // |key_id| not found, so need to create new entry.
+ scoped_ptr<SessionIdDecryptionKeyMap> inner_map(
+ new SessionIdDecryptionKeyMap());
+ inner_map->Insert(session_id, decryption_key.Pass());
+ key_map_.add(key_id, inner_map.Pass());
return true;
}
AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
const std::string& key_id) const {
base::AutoLock auto_lock(key_map_lock_);
- KeyMap::const_iterator found = key_map_.find(key_id);
- if (found == key_map_.end())
+ KeyIdToSessionKeysMap::const_iterator key_id_found = key_map_.find(key_id);
+ if (key_id_found == key_map_.end())
return NULL;
- return found->second;
+ // Return the key from the "latest" session_id entry.
+ return key_id_found->second->LatestDecryptionKey();
+}
+
+void AesDecryptor::DeleteKeysForSession(const uint32 session_id) {
+ base::AutoLock auto_lock(key_map_lock_);
+
+ // Remove all keys associated with |session_id|. Since the data is optimized
+ // for access in GetKey(), we need to look at each entry in |key_map_|.
+ KeyIdToSessionKeysMap::iterator it = key_map_.begin();
+ while (it != key_map_.end()) {
+ it->second->Erase(session_id);
+ if (it->second->Empty()) {
+ // Need to get rid of the entry for this key_id. This will mess up the
+ // iterator, so we need to increment it first.
+ KeyIdToSessionKeysMap::iterator current = it;
+ ++it;
+ key_map_.erase(current);
+ } else {
+ ++it;
+ }
+ }
}
AesDecryptor::DecryptionKey::DecryptionKey(const std::string& secret)
diff --git a/chromium/media/cdm/aes_decryptor.h b/chromium/media/cdm/aes_decryptor.h
index 3ab4bc0f9f4..a72674c102d 100644
--- a/chromium/media/cdm/aes_decryptor.h
+++ b/chromium/media/cdm/aes_decryptor.h
@@ -5,13 +5,13 @@
#ifndef MEDIA_CRYPTO_AES_DECRYPTOR_H_
#define MEDIA_CRYPTO_AES_DECRYPTOR_H_
+#include <set>
#include <string>
#include "base/basictypes.h"
-#include "base/containers/hash_tables.h"
+#include "base/containers/scoped_ptr_hash_map.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
#include "media/base/decryptor.h"
#include "media/base/media_export.h"
@@ -27,19 +27,22 @@ namespace media {
// encryption must be CTR with a key size of 128bits.
class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
public:
- AesDecryptor(const KeyAddedCB& key_added_cb,
- const KeyErrorCB& key_error_cb,
- const KeyMessageCB& key_message_cb);
+ AesDecryptor(const SessionCreatedCB& session_created_cb,
+ const SessionMessageCB& session_message_cb,
+ const SessionReadyCB& session_ready_cb,
+ const SessionClosedCB& session_closed_cb,
+ const SessionErrorCB& session_error_cb);
virtual ~AesDecryptor();
// MediaKeys implementation.
- virtual bool GenerateKeyRequest(const std::string& type,
- const uint8* init_data,
- int init_data_length) OVERRIDE;
- virtual void AddKey(const uint8* key, int key_length,
- const uint8* init_data, int init_data_length,
- const std::string& session_id) OVERRIDE;
- virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
+ virtual bool CreateSession(uint32 session_id,
+ const std::string& type,
+ const uint8* init_data,
+ int init_data_length) OVERRIDE;
+ virtual void UpdateSession(uint32 session_id,
+ const uint8* response,
+ int response_length) OVERRIDE;
+ virtual void ReleaseSession(uint32 session_id) OVERRIDE;
virtual Decryptor* GetDecryptor() OVERRIDE;
// Decryptor implementation.
@@ -86,34 +89,48 @@ class MEDIA_EXPORT AesDecryptor : public MediaKeys, public Decryptor {
DISALLOW_COPY_AND_ASSIGN(DecryptionKey);
};
+ // Keep track of the keys for a key ID. If multiple sessions specify keys
+ // for the same key ID, then the last key inserted is used. The structure is
+ // optimized so that Decrypt() has fast access, at the cost of slow deletion
+ // of keys when a session is released.
+ class SessionIdDecryptionKeyMap;
+
+ // Key ID <-> SessionIdDecryptionKeyMap map.
+ typedef base::ScopedPtrHashMap<std::string, SessionIdDecryptionKeyMap>
+ KeyIdToSessionKeysMap;
+
// Creates a DecryptionKey using |key_string| and associates it with |key_id|.
// Returns true if successful.
- bool AddDecryptionKey(const std::string& key_id,
+ bool AddDecryptionKey(const uint32 session_id,
+ const std::string& key_id,
const std::string& key_string);
// Gets a DecryptionKey associated with |key_id|. The AesDecryptor still owns
// the key. Returns NULL if no key is associated with |key_id|.
DecryptionKey* GetKey(const std::string& key_id) const;
- // Callbacks for firing key events.
- KeyAddedCB key_added_cb_;
- KeyErrorCB key_error_cb_;
- KeyMessageCB key_message_cb_;
+ // Deletes all keys associated with |session_id|.
+ void DeleteKeysForSession(const uint32 session_id);
- // KeyMap owns the DecryptionKey* and must delete them when they are
- // not needed any more.
- typedef base::hash_map<std::string, DecryptionKey*> KeyMap;
+ // Callbacks for firing session events.
+ SessionCreatedCB session_created_cb_;
+ SessionMessageCB session_message_cb_;
+ SessionReadyCB session_ready_cb_;
+ SessionClosedCB session_closed_cb_;
+ SessionErrorCB session_error_cb_;
// Since only Decrypt() is called off the renderer thread, we only need to
// protect |key_map_|, the only member variable that is shared between
// Decrypt() and other methods.
- KeyMap key_map_; // Protected by the |key_map_lock_|.
+ KeyIdToSessionKeysMap key_map_; // Protected by |key_map_lock_|.
mutable base::Lock key_map_lock_; // Protects the |key_map_|.
- // Make session ID unique per renderer by making it static.
- // TODO(xhwang): Make session ID more strictly defined if needed:
- // https://www.w3.org/Bugs/Public/show_bug.cgi?id=16739#c0
- static uint32 next_session_id_;
+ // Keeps track of current valid session IDs.
+ std::set<uint32> valid_sessions_;
+
+ // Make web session ID unique per renderer by making it static. Web session
+ // IDs seen by the app will be "1", "2", etc.
+ static uint32 next_web_session_id_;
NewKeyCB new_audio_key_cb_;
NewKeyCB new_video_key_cb_;
diff --git a/chromium/media/cdm/aes_decryptor_unittest.cc b/chromium/media/cdm/aes_decryptor_unittest.cc
index a4b865c4690..3076d5a3f2e 100644
--- a/chromium/media/cdm/aes_decryptor_unittest.cc
+++ b/chromium/media/cdm/aes_decryptor_unittest.cc
@@ -26,63 +26,73 @@ MATCHER(IsEmpty, "") { return arg.empty(); }
namespace media {
-static const char kClearKeySystem[] = "org.w3.clearkey";
+const uint8 kOriginalData[] = "Original subsample data.";
+const int kOriginalDataSize = 24;
-static const uint8 kOriginalData[] = "Original subsample data.";
-static const int kOriginalDataSize = 24;
+// In the examples below, 'k'(key) has to be 16 bytes, and will always require
+// 2 bytes of padding. 'kid'(keyid) is variable length, and may require 0, 1,
+// or 2 bytes of padding.
-static const uint8 kKeyId[] = {
- // base64 equivalent is AAECAw==
+const uint8 kKeyId[] = {
+ // base64 equivalent is AAECAw
0x00, 0x01, 0x02, 0x03
};
-static const uint8 kKey[] = {
- // base64 equivalent is BAUGBwgJCgsMDQ4PEBESEw==
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13
-};
+// Key is 0x0405060708090a0b0c0d0e0f10111213,
+// base64 equivalent is BAUGBwgJCgsMDQ4PEBESEw.
+const char kKeyAsJWK[] =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
-static const char kKeyAsJWK[] =
+// Same kid as kKeyAsJWK, key to decrypt kEncryptedData2
+const char kKeyAlternateAsJWK[] =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAw==\","
- " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" }"
" ]"
"}";
-static const char kWrongKeyAsJWK[] =
+const char kWrongKeyAsJWK[] =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAw==\","
- " \"k\": \"7u7u7u7u7u7u7u7u7u7u7g==\""
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"7u7u7u7u7u7u7u7u7u7u7g\""
" }"
" ]"
"}";
-static const char kWrongSizedKeyAsJWK[] =
+const char kWrongSizedKeyAsJWK[] =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAw==\","
- " \"k\": \"AAECAw==\""
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"AAECAw\""
" }"
" ]"
"}";
-static const uint8 kIv[] = {
+const uint8 kIv[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
// kOriginalData encrypted with kKey and kIv but without any subsamples (or
// equivalently using kSubsampleEntriesCypherOnly).
-static const uint8 kEncryptedData[] = {
+const uint8 kEncryptedData[] = {
0x2f, 0x03, 0x09, 0xef, 0x71, 0xaf, 0x31, 0x16,
0xfa, 0x9d, 0x18, 0x43, 0x1e, 0x96, 0x71, 0xb5,
0xbf, 0xf5, 0x30, 0x53, 0x9a, 0x20, 0xdf, 0x95
@@ -90,40 +100,40 @@ static const uint8 kEncryptedData[] = {
// kOriginalData encrypted with kSubsampleKey and kSubsampleIv using
// kSubsampleEntriesNormal.
-static const uint8 kSubsampleEncryptedData[] = {
+const uint8 kSubsampleEncryptedData[] = {
0x4f, 0x72, 0x09, 0x16, 0x09, 0xe6, 0x79, 0xad,
0x70, 0x73, 0x75, 0x62, 0x09, 0xbb, 0x83, 0x1d,
0x4d, 0x08, 0xd7, 0x78, 0xa4, 0xa7, 0xf1, 0x2e
};
-static const uint8 kOriginalData2[] = "Changed Original data.";
+const uint8 kOriginalData2[] = "Changed Original data.";
-static const uint8 kIv2[] = {
+const uint8 kIv2[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static const uint8 kKeyId2[] = {
+const uint8 kKeyId2[] = {
// base64 equivalent is AAECAwQFBgcICQoLDA0ODxAREhM=
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13
};
-static const char kKey2AsJWK[] =
+const char kKey2AsJWK[] =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
- " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" }"
" ]"
"}";
// 'k' in bytes is x14x15x16x17x18x19x1ax1bx1cx1dx1ex1fx20x21x22x23
-static const uint8 kEncryptedData2[] = {
+const uint8 kEncryptedData2[] = {
0x57, 0x66, 0xf4, 0x12, 0x1a, 0xed, 0xb5, 0x79,
0x1c, 0x8e, 0x25, 0xd7, 0x17, 0xe7, 0x5e, 0x16,
0xe3, 0x40, 0x08, 0x27, 0x11, 0xe9
@@ -133,31 +143,31 @@ static const uint8 kEncryptedData2[] = {
// all entries must be equal to kOriginalDataSize to make the subsample entries
// valid.
-static const SubsampleEntry kSubsampleEntriesNormal[] = {
+const SubsampleEntry kSubsampleEntriesNormal[] = {
{ 2, 7 },
{ 3, 11 },
{ 1, 0 }
};
-static const SubsampleEntry kSubsampleEntriesWrongSize[] = {
+const SubsampleEntry kSubsampleEntriesWrongSize[] = {
{ 3, 6 }, // This entry doesn't match the correct entry.
{ 3, 11 },
{ 1, 0 }
};
-static const SubsampleEntry kSubsampleEntriesInvalidTotalSize[] = {
+const SubsampleEntry kSubsampleEntriesInvalidTotalSize[] = {
{ 1, 1000 }, // This entry is too large.
{ 3, 11 },
{ 1, 0 }
};
-static const SubsampleEntry kSubsampleEntriesClearOnly[] = {
+const SubsampleEntry kSubsampleEntriesClearOnly[] = {
{ 7, 0 },
{ 8, 0 },
{ 9, 0 }
};
-static const SubsampleEntry kSubsampleEntriesCypherOnly[] = {
+const SubsampleEntry kSubsampleEntriesCypherOnly[] = {
{ 0, 6 },
{ 0, 8 },
{ 0, 10 }
@@ -187,10 +197,16 @@ static scoped_refptr<DecoderBuffer> CreateEncryptedBuffer(
class AesDecryptorTest : public testing::Test {
public:
AesDecryptorTest()
- : decryptor_(
- base::Bind(&AesDecryptorTest::KeyAdded, base::Unretained(this)),
- base::Bind(&AesDecryptorTest::KeyError, base::Unretained(this)),
- base::Bind(&AesDecryptorTest::KeyMessage, base::Unretained(this))),
+ : decryptor_(base::Bind(&AesDecryptorTest::OnSessionCreated,
+ base::Unretained(this)),
+ base::Bind(&AesDecryptorTest::OnSessionMessage,
+ base::Unretained(this)),
+ base::Bind(&AesDecryptorTest::OnSessionReady,
+ base::Unretained(this)),
+ base::Bind(&AesDecryptorTest::OnSessionClosed,
+ base::Unretained(this)),
+ base::Bind(&AesDecryptorTest::OnSessionError,
+ base::Unretained(this))),
decrypt_cb_(base::Bind(&AesDecryptorTest::BufferDecrypted,
base::Unretained(this))),
original_data_(kOriginalData, kOriginalData + kOriginalDataSize),
@@ -203,58 +219,52 @@ class AesDecryptorTest : public testing::Test {
iv_(kIv, kIv + arraysize(kIv)),
normal_subsample_entries_(
kSubsampleEntriesNormal,
- kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {
+ kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)),
+ next_session_id_(1) {
}
protected:
- void GenerateKeyRequest(const std::vector<uint8>& key_id) {
+ // Creates a new session using |key_id|. Returns the session ID.
+ uint32 CreateSession(const std::vector<uint8>& key_id) {
DCHECK(!key_id.empty());
- EXPECT_CALL(*this, KeyMessage(StrNe(std::string()), key_id, ""))
- .WillOnce(SaveArg<0>(&session_id_string_));
- EXPECT_TRUE(decryptor_.GenerateKeyRequest(
- std::string(), &key_id[0], key_id.size()));
+ uint32 session_id = next_session_id_++;
+ EXPECT_CALL(*this, OnSessionCreated(session_id, StrNe(std::string())));
+ EXPECT_CALL(*this, OnSessionMessage(session_id, key_id, ""));
+ EXPECT_TRUE(decryptor_.CreateSession(
+ session_id, std::string(), &key_id[0], key_id.size()));
+ return session_id;
}
- enum AddKeyExpectation {
- KEY_ADDED,
- KEY_ERROR
- };
-
- void AddRawKeyAndExpect(const std::vector<uint8>& key_id,
- const std::vector<uint8>& key,
- AddKeyExpectation result) {
- // TODO(jrummell): Remove once raw keys no longer supported.
- DCHECK(!key_id.empty());
- DCHECK(!key.empty());
-
- if (result == KEY_ADDED) {
- EXPECT_CALL(*this, KeyAdded(session_id_string_));
- } else if (result == KEY_ERROR) {
- EXPECT_CALL(*this, KeyError(session_id_string_,
- MediaKeys::kUnknownError, 0));
- } else {
- NOTREACHED();
- }
-
- decryptor_.AddKey(&key[0], key.size(), &key_id[0], key_id.size(),
- session_id_string_);
+ // Releases the session specified by |session_id|.
+ void ReleaseSession(uint32 session_id) {
+ EXPECT_CALL(*this, OnSessionClosed(session_id));
+ decryptor_.ReleaseSession(session_id);
}
- void AddKeyAndExpect(const std::string& key, AddKeyExpectation result) {
+ enum UpdateSessionExpectation {
+ SESSION_READY,
+ SESSION_ERROR
+ };
+
+ // Updates the session specified by |session_id| with |key|. |result|
+ // tests that the update succeeds or generates an error.
+ void UpdateSessionAndExpect(uint32 session_id,
+ const std::string& key,
+ UpdateSessionExpectation result) {
DCHECK(!key.empty());
- if (result == KEY_ADDED) {
- EXPECT_CALL(*this, KeyAdded(session_id_string_));
- } else if (result == KEY_ERROR) {
- EXPECT_CALL(*this,
- KeyError(session_id_string_, MediaKeys::kUnknownError, 0));
- } else {
- NOTREACHED();
+ switch (result) {
+ case SESSION_READY:
+ EXPECT_CALL(*this, OnSessionReady(session_id));
+ break;
+ case SESSION_ERROR:
+ EXPECT_CALL(*this,
+ OnSessionError(session_id, MediaKeys::kUnknownError, 0));
+ break;
}
- decryptor_.AddKey(reinterpret_cast<const uint8*>(key.c_str()), key.length(),
- NULL, 0,
- session_id_string_);
+ decryptor_.UpdateSession(
+ session_id, reinterpret_cast<const uint8*>(key.c_str()), key.length());
}
MOCK_METHOD2(BufferDecrypted, void(Decryptor::Status,
@@ -264,7 +274,8 @@ class AesDecryptorTest : public testing::Test {
SUCCESS,
DATA_MISMATCH,
DATA_AND_SIZE_MISMATCH,
- DECRYPT_ERROR
+ DECRYPT_ERROR,
+ NO_KEY
};
void DecryptAndExpect(const scoped_refptr<DecoderBuffer>& encrypted,
@@ -272,12 +283,21 @@ class AesDecryptorTest : public testing::Test {
DecryptExpectation result) {
scoped_refptr<DecoderBuffer> decrypted;
- if (result != DECRYPT_ERROR) {
- EXPECT_CALL(*this, BufferDecrypted(Decryptor::kSuccess, NotNull()))
- .WillOnce(SaveArg<1>(&decrypted));
- } else {
- EXPECT_CALL(*this, BufferDecrypted(Decryptor::kError, IsNull()))
- .WillOnce(SaveArg<1>(&decrypted));
+ switch (result) {
+ case SUCCESS:
+ case DATA_MISMATCH:
+ case DATA_AND_SIZE_MISMATCH:
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kSuccess, NotNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ break;
+ case DECRYPT_ERROR:
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kError, IsNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ break;
+ case NO_KEY:
+ EXPECT_CALL(*this, BufferDecrypted(Decryptor::kNoKey, IsNull()))
+ .WillOnce(SaveArg<1>(&decrypted));
+ break;
}
decryptor_.Decrypt(Decryptor::kVideo, encrypted, decrypt_cb_);
@@ -300,20 +320,24 @@ class AesDecryptorTest : public testing::Test {
EXPECT_NE(plain_text.size(), decrypted_text.size());
break;
case DECRYPT_ERROR:
+ case NO_KEY:
EXPECT_TRUE(decrypted_text.empty());
break;
}
}
- MOCK_METHOD1(KeyAdded, void(const std::string&));
- MOCK_METHOD3(KeyError, void(const std::string&,
- MediaKeys::KeyError, int));
- MOCK_METHOD3(KeyMessage, void(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url));
+ MOCK_METHOD2(OnSessionCreated,
+ void(uint32 session_id, const std::string& web_session_id));
+ MOCK_METHOD3(OnSessionMessage,
+ void(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url));
+ MOCK_METHOD1(OnSessionReady, void(uint32 session_id));
+ MOCK_METHOD1(OnSessionClosed, void(uint32 session_id));
+ MOCK_METHOD3(OnSessionError,
+ void(uint32 session_id, MediaKeys::KeyError, int system_code));
AesDecryptor decryptor_;
- std::string session_id_string_;
AesDecryptor::DecryptCB decrypt_cb_;
// Constants for testing.
@@ -324,24 +348,46 @@ class AesDecryptorTest : public testing::Test {
const std::vector<uint8> iv_;
const std::vector<SubsampleEntry> normal_subsample_entries_;
const std::vector<SubsampleEntry> no_subsample_entries_;
+
+ // Generate new session ID every time
+ uint32 next_session_id_;
};
-TEST_F(AesDecryptorTest, GenerateKeyRequestWithNullInitData) {
- EXPECT_CALL(*this, KeyMessage(StrNe(std::string()), IsEmpty(), ""));
- EXPECT_TRUE(decryptor_.GenerateKeyRequest(std::string(), NULL, 0));
+TEST_F(AesDecryptorTest, CreateSessionWithNullInitData) {
+ uint32 session_id = 8;
+ EXPECT_CALL(*this, OnSessionMessage(session_id, IsEmpty(), ""));
+ EXPECT_CALL(*this, OnSessionCreated(session_id, StrNe(std::string())));
+ EXPECT_TRUE(decryptor_.CreateSession(session_id, std::string(), NULL, 0));
+}
+
+TEST_F(AesDecryptorTest, MultipleCreateSession) {
+ uint32 session_id1 = 10;
+ EXPECT_CALL(*this, OnSessionMessage(session_id1, IsEmpty(), ""));
+ EXPECT_CALL(*this, OnSessionCreated(session_id1, StrNe(std::string())));
+ EXPECT_TRUE(decryptor_.CreateSession(session_id1, std::string(), NULL, 0));
+
+ uint32 session_id2 = 11;
+ EXPECT_CALL(*this, OnSessionMessage(session_id2, IsEmpty(), ""));
+ EXPECT_CALL(*this, OnSessionCreated(session_id2, StrNe(std::string())));
+ EXPECT_TRUE(decryptor_.CreateSession(session_id2, std::string(), NULL, 0));
+
+ uint32 session_id3 = 23;
+ EXPECT_CALL(*this, OnSessionMessage(session_id3, IsEmpty(), ""));
+ EXPECT_CALL(*this, OnSessionCreated(session_id3, StrNe(std::string())));
+ EXPECT_TRUE(decryptor_.CreateSession(session_id3, std::string(), NULL, 0));
}
TEST_F(AesDecryptorTest, NormalDecryption) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, DecryptionWithOffset) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, 23, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
@@ -355,8 +401,8 @@ TEST_F(AesDecryptorTest, UnencryptedFrame) {
}
TEST_F(AesDecryptorTest, WrongKey) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
@@ -370,37 +416,33 @@ TEST_F(AesDecryptorTest, NoKey) {
}
TEST_F(AesDecryptorTest, KeyReplacement) {
- GenerateKeyRequest(key_id_);
+ uint32 session_id = CreateSession(key_id_);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
- AddKeyAndExpect(kWrongKeyAsJWK, KEY_ADDED);
+ UpdateSessionAndExpect(session_id, kWrongKeyAsJWK, SESSION_READY);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
encrypted_buffer, original_data_, DATA_MISMATCH));
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
}
TEST_F(AesDecryptorTest, WrongSizedKey) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kWrongSizedKeyAsJWK, KEY_ERROR);
-
- // Repeat for a raw key. Use "-1" to create a wrong sized key.
- std::vector<uint8> wrong_sized_key(kKey, kKey + arraysize(kKey) - 1);
- AddRawKeyAndExpect(key_id_, wrong_sized_key, KEY_ERROR);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kWrongSizedKeyAsJWK, SESSION_ERROR);
}
TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, 10, no_subsample_entries_);
ASSERT_NO_FATAL_FAILURE(
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
- AddKeyAndExpect(kKey2AsJWK, KEY_ADDED);
+ UpdateSessionAndExpect(session_id, kKey2AsJWK, SESSION_READY);
// The first key is still available after we added a second key.
ASSERT_NO_FATAL_FAILURE(
@@ -422,8 +464,8 @@ TEST_F(AesDecryptorTest, MultipleKeysAndFrames) {
}
TEST_F(AesDecryptorTest, CorruptedIv) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<uint8> bad_iv = iv_;
bad_iv[1]++;
@@ -435,8 +477,8 @@ TEST_F(AesDecryptorTest, CorruptedIv) {
}
TEST_F(AesDecryptorTest, CorruptedData) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<uint8> bad_data = encrypted_data_;
bad_data[1]++;
@@ -447,16 +489,16 @@ TEST_F(AesDecryptorTest, CorruptedData) {
}
TEST_F(AesDecryptorTest, EncryptedAsUnencryptedFailure) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, std::vector<uint8>(), 0, no_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH);
}
TEST_F(AesDecryptorTest, SubsampleDecryption) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
subsample_encrypted_data_, key_id_, iv_, 0, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
@@ -466,16 +508,16 @@ TEST_F(AesDecryptorTest, SubsampleDecryption) {
// expect to encounter this in the wild, but since the DecryptConfig doesn't
// disallow such a configuration, it should be covered.
TEST_F(AesDecryptorTest, SubsampleDecryptionWithOffset) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
subsample_encrypted_data_, key_id_, iv_, 23, normal_subsample_entries_);
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
TEST_F(AesDecryptorTest, SubsampleWrongSize) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<SubsampleEntry> subsample_entries_wrong_size(
kSubsampleEntriesWrongSize,
@@ -487,8 +529,8 @@ TEST_F(AesDecryptorTest, SubsampleWrongSize) {
}
TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
kSubsampleEntriesInvalidTotalSize,
@@ -503,8 +545,8 @@ TEST_F(AesDecryptorTest, SubsampleInvalidTotalSize) {
// No cypher bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
@@ -517,8 +559,8 @@ TEST_F(AesDecryptorTest, SubsampleClearBytesOnly) {
// No clear bytes in any of the subsamples.
TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
- GenerateKeyRequest(key_id_);
- AddKeyAndExpect(kKeyAsJWK, KEY_ADDED);
+ uint32 session_id = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
@@ -529,66 +571,151 @@ TEST_F(AesDecryptorTest, SubsampleCypherBytesOnly) {
DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
}
+TEST_F(AesDecryptorTest, ReleaseSession) {
+ uint32 session_id = CreateSession(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ ReleaseSession(session_id);
+}
+
+TEST_F(AesDecryptorTest, NoKeyAfterReleaseSession) {
+ uint32 session_id = CreateSession(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+
+ UpdateSessionAndExpect(session_id, kKeyAsJWK, SESSION_READY);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ ReleaseSession(session_id);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, NO_KEY));
+}
+
+TEST_F(AesDecryptorTest, LatestKeyUsed) {
+ uint32 session_id1 = CreateSession(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+
+ // Add alternate key, buffer should not be decoded properly.
+ UpdateSessionAndExpect(session_id1, kKeyAlternateAsJWK, SESSION_READY);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH));
+
+ // Create a second session with a correct key value for key_id_.
+ uint32 session_id2 = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id2, kKeyAsJWK, SESSION_READY);
+
+ // Should be able to decode with latest key.
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+}
+
+TEST_F(AesDecryptorTest, LatestKeyUsedAfterReleaseSession) {
+ uint32 session_id1 = CreateSession(key_id_);
+ scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
+ encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
+ UpdateSessionAndExpect(session_id1, kKeyAsJWK, SESSION_READY);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+
+ // Create a second session with a different key value for key_id_.
+ uint32 session_id2 = CreateSession(key_id_);
+ UpdateSessionAndExpect(session_id2, kKeyAlternateAsJWK, SESSION_READY);
+
+ // Should not be able to decode with new key.
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, DATA_MISMATCH));
+
+ // Close second session, should revert to original key.
+ ReleaseSession(session_id2);
+ ASSERT_NO_FATAL_FAILURE(
+ DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS));
+}
+
TEST_F(AesDecryptorTest, JWKKey) {
+ uint32 session_id = CreateSession(key_id_);
+
// Try a simple JWK key (i.e. not in a set)
- const std::string key1 =
+ const std::string kJwkSimple =
"{"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
- " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
"}";
- AddKeyAndExpect(key1, KEY_ERROR);
+ UpdateSessionAndExpect(session_id, kJwkSimple, SESSION_ERROR);
// Try a key list with multiple entries.
- const std::string key2 =
+ const std::string kJwksMultipleEntries =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM=\","
- " \"k\": \"FBUWFxgZGhscHR4fICEiIw==\""
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
" },"
" {"
" \"kty\": \"oct\","
- " \"kid\": \"JCUmJygpKissLS4vMA==\","
- " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA==\""
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
" }"
" ]"
"}";
- AddKeyAndExpect(key2, KEY_ADDED);
+ UpdateSessionAndExpect(session_id, kJwksMultipleEntries, SESSION_READY);
// Try a key with no spaces and some \n plus additional fields.
- const std::string key3 =
+ const std::string kJwksNoSpaces =
"\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
- "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM=\",\"k\":\"GawgguFyGrWKav7AX4VKUg="
- "=\",\"foo\":\"bar\"}]}\n\n";
- AddKeyAndExpect(key3, KEY_ADDED);
+ "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
+ "\",\"foo\":\"bar\"}]}\n\n";
+ UpdateSessionAndExpect(session_id, kJwksNoSpaces, SESSION_READY);
// Try some non-ASCII characters.
- AddKeyAndExpect("This is not ASCII due to \xff\xfe\xfd in it.", KEY_ERROR);
+ UpdateSessionAndExpect(session_id,
+ "This is not ASCII due to \xff\xfe\xfd in it.",
+ SESSION_ERROR);
// Try a badly formatted key. Assume that the JSON parser is fully tested,
// so we won't try a lot of combinations. However, need a test to ensure
// that the code doesn't crash if invalid JSON received.
- AddKeyAndExpect("This is not a JSON key.", KEY_ERROR);
+ UpdateSessionAndExpect(session_id, "This is not a JSON key.", SESSION_ERROR);
// Try passing some valid JSON that is not a dictionary at the top level.
- AddKeyAndExpect("40", KEY_ERROR);
+ UpdateSessionAndExpect(session_id, "40", SESSION_ERROR);
// Try an empty dictionary.
- AddKeyAndExpect("{ }", KEY_ERROR);
+ UpdateSessionAndExpect(session_id, "{ }", SESSION_ERROR);
// Try an empty 'keys' dictionary.
- AddKeyAndExpect("{ \"keys\": [] }", KEY_ERROR);
+ UpdateSessionAndExpect(session_id, "{ \"keys\": [] }", SESSION_ERROR);
// Try with 'keys' not a dictionary.
- AddKeyAndExpect("{ \"keys\":\"1\" }", KEY_ERROR);
+ UpdateSessionAndExpect(session_id, "{ \"keys\":\"1\" }", SESSION_ERROR);
// Try with 'keys' a list of integers.
- AddKeyAndExpect("{ \"keys\": [ 1, 2, 3 ] }", KEY_ERROR);
+ UpdateSessionAndExpect(
+ session_id, "{ \"keys\": [ 1, 2, 3 ] }", SESSION_ERROR);
+
+ // Try padding(=) at end of 'k' base64 string.
+ const std::string kJwksWithPaddedKey =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKey, SESSION_ERROR);
- // Try a key missing padding(=) at end of base64 string.
- const std::string key4 =
+ // Try padding(=) at end of 'kid' base64 string.
+ const std::string kJwksWithPaddedKeyId =
"{"
" \"keys\": ["
" {"
@@ -598,44 +725,49 @@ TEST_F(AesDecryptorTest, JWKKey) {
" }"
" ]"
"}";
- AddKeyAndExpect(key4, KEY_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithPaddedKeyId, SESSION_ERROR);
- // Try a key ID missing padding(=) at end of base64 string.
- const std::string key5 =
+ // Try a key with invalid base64 encoding.
+ const std::string kJwksWithInvalidBase64 =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"AAECAw\","
- " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " \"kid\": \"!@#$%^&*()\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- AddKeyAndExpect(key5, KEY_ERROR);
+ UpdateSessionAndExpect(session_id, kJwksWithInvalidBase64, SESSION_ERROR);
- // Try a key with invalid base64 encoding.
- const std::string key6 =
+ // Try a 3-byte 'kid' where no base64 padding is required.
+ // |kJwksMultipleEntries| above has 2 'kid's that require 1 and 2 padding
+ // bytes. Note that 'k' has to be 16 bytes, so it will always require padding.
+ const std::string kJwksWithNoPadding =
"{"
" \"keys\": ["
" {"
" \"kty\": \"oct\","
- " \"kid\": \"!@#$%^&*()==\","
- " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " \"kid\": \"Kiss\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
" }"
" ]"
"}";
- AddKeyAndExpect(key6, KEY_ERROR);
-}
+ UpdateSessionAndExpect(session_id, kJwksWithNoPadding, SESSION_READY);
-TEST_F(AesDecryptorTest, RawKey) {
- // Verify that v0.1b keys (raw key) is still supported. Raw keys are
- // 16 bytes long. Use the undecoded value of |kKey|.
- GenerateKeyRequest(key_id_);
- AddRawKeyAndExpect(
- key_id_, std::vector<uint8>(kKey, kKey + arraysize(kKey)), KEY_ADDED);
- scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
- encrypted_data_, key_id_, iv_, 0, no_subsample_entries_);
- DecryptAndExpect(encrypted_buffer, original_data_, SUCCESS);
+ // Empty key id.
+ const std::string kJwksWithEmptyKeyId =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ UpdateSessionAndExpect(session_id, kJwksWithEmptyKeyId, SESSION_ERROR);
+ ReleaseSession(session_id);
}
} // namespace media
diff --git a/chromium/media/cdm/json_web_key.cc b/chromium/media/cdm/json_web_key.cc
new file mode 100644
index 00000000000..522f1c9b367
--- /dev/null
+++ b/chromium/media/cdm/json_web_key.cc
@@ -0,0 +1,163 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/json_web_key.h"
+
+#include "base/base64.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_string_value_serializer.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_util.h"
+#include "base/values.h"
+
+namespace media {
+
+const char kKeysTag[] = "keys";
+const char kKeyTypeTag[] = "kty";
+const char kSymmetricKeyValue[] = "oct";
+const char kKeyTag[] = "k";
+const char kKeyIdTag[] = "kid";
+const char kBase64Padding = '=';
+
+// Encodes |input| into a base64 string without padding.
+static std::string EncodeBase64(const uint8* input, int input_length) {
+ std::string encoded_text;
+ base::Base64Encode(
+ std::string(reinterpret_cast<const char*>(input), input_length),
+ &encoded_text);
+
+ // Remove any padding characters added by Base64Encode().
+ size_t found = encoded_text.find_last_not_of(kBase64Padding);
+ if (found != std::string::npos)
+ encoded_text.erase(found + 1);
+
+ return encoded_text;
+}
+
+// Decodes an unpadded base64 string. Returns empty string on error.
+static std::string DecodeBase64(const std::string& encoded_text) {
+ // EME spec doesn't allow padding characters.
+ if (encoded_text.find_first_of(kBase64Padding) != std::string::npos)
+ return std::string();
+
+ // Since base::Base64Decode() requires padding characters, add them so length
+ // of |encoded_text| is exactly a multiple of 4.
+ size_t num_last_grouping_chars = encoded_text.length() % 4;
+ std::string modified_text = encoded_text;
+ if (num_last_grouping_chars > 0)
+ modified_text.append(4 - num_last_grouping_chars, kBase64Padding);
+
+ std::string decoded_text;
+ if (!base::Base64Decode(modified_text, &decoded_text))
+ return std::string();
+
+ return decoded_text;
+}
+
+std::string GenerateJWKSet(const uint8* key, int key_length,
+ const uint8* key_id, int key_id_length) {
+ // Both |key| and |key_id| need to be base64 encoded strings in the JWK.
+ std::string key_base64 = EncodeBase64(key, key_length);
+ std::string key_id_base64 = EncodeBase64(key_id, key_id_length);
+
+ // Create the JWK, and wrap it into a JWK Set.
+ scoped_ptr<base::DictionaryValue> jwk(new base::DictionaryValue());
+ jwk->SetString(kKeyTypeTag, kSymmetricKeyValue);
+ jwk->SetString(kKeyTag, key_base64);
+ jwk->SetString(kKeyIdTag, key_id_base64);
+ scoped_ptr<base::ListValue> list(new base::ListValue());
+ list->Append(jwk.release());
+ base::DictionaryValue jwk_set;
+ jwk_set.Set(kKeysTag, list.release());
+
+ // Finally serialize |jwk_set| into a string and return it.
+ std::string serialized_jwk;
+ JSONStringValueSerializer serializer(&serialized_jwk);
+ serializer.Serialize(jwk_set);
+ return serialized_jwk;
+}
+
+// Processes a JSON Web Key to extract the key id and key value. Sets |jwk_key|
+// to the id/value pair and returns true on success.
+static bool ConvertJwkToKeyPair(const DictionaryValue& jwk,
+ KeyIdAndKeyPair* jwk_key) {
+ // Have found a JWK, start by checking that it is a symmetric key.
+ std::string type;
+ if (!jwk.GetString(kKeyTypeTag, &type) || type != kSymmetricKeyValue) {
+ DVLOG(1) << "JWK is not a symmetric key";
+ return false;
+ }
+
+ // Get the key id and actual key parameters.
+ std::string encoded_key_id;
+ std::string encoded_key;
+ if (!jwk.GetString(kKeyIdTag, &encoded_key_id)) {
+ DVLOG(1) << "Missing '" << kKeyIdTag << "' parameter";
+ return false;
+ }
+ if (!jwk.GetString(kKeyTag, &encoded_key)) {
+ DVLOG(1) << "Missing '" << kKeyTag << "' parameter";
+ return false;
+ }
+
+ // Key ID and key are base64-encoded strings, so decode them.
+ std::string raw_key_id = DecodeBase64(encoded_key_id);
+ if (raw_key_id.empty()) {
+ DVLOG(1) << "Invalid '" << kKeyIdTag << "' value: " << encoded_key_id;
+ return false;
+ }
+
+ std::string raw_key = DecodeBase64(encoded_key);
+ if (raw_key.empty()) {
+ DVLOG(1) << "Invalid '" << kKeyTag << "' value: " << encoded_key;
+ return false;
+ }
+
+ // Add the decoded key ID and the decoded key to the list.
+ *jwk_key = std::make_pair(raw_key_id, raw_key);
+ return true;
+}
+
+bool ExtractKeysFromJWKSet(const std::string& jwk_set, KeyIdAndKeyPairs* keys) {
+ if (!IsStringASCII(jwk_set))
+ return false;
+
+ scoped_ptr<Value> root(base::JSONReader().ReadToValue(jwk_set));
+ if (!root.get() || root->GetType() != Value::TYPE_DICTIONARY)
+ return false;
+
+ // Locate the set from the dictionary.
+ DictionaryValue* dictionary = static_cast<DictionaryValue*>(root.get());
+ ListValue* list_val = NULL;
+ if (!dictionary->GetList(kKeysTag, &list_val)) {
+ DVLOG(1) << "Missing '" << kKeysTag
+ << "' parameter or not a list in JWK Set";
+ return false;
+ }
+
+ // Create a local list of keys, so that |jwk_keys| only gets updated on
+ // success.
+ KeyIdAndKeyPairs local_keys;
+ for (size_t i = 0; i < list_val->GetSize(); ++i) {
+ DictionaryValue* jwk = NULL;
+ if (!list_val->GetDictionary(i, &jwk)) {
+ DVLOG(1) << "Unable to access '" << kKeysTag << "'[" << i
+ << "] in JWK Set";
+ return false;
+ }
+ KeyIdAndKeyPair key_pair;
+ if (!ConvertJwkToKeyPair(*jwk, &key_pair)) {
+ DVLOG(1) << "Error from '" << kKeysTag << "'[" << i << "]";
+ return false;
+ }
+ local_keys.push_back(key_pair);
+ }
+
+ // Successfully processed all JWKs in the set.
+ keys->swap(local_keys);
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/json_web_key.h b/chromium/media/cdm/json_web_key.h
new file mode 100644
index 00000000000..cb483aeb8bd
--- /dev/null
+++ b/chromium/media/cdm/json_web_key.h
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_JSON_WEB_KEY_H_
+#define MEDIA_CDM_JSON_WEB_KEY_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A JSON Web Key Set looks like the following in JSON:
+// { "keys": [ JWK1, JWK2, ... ] }
+// A symmetric keys JWK looks like the following in JSON:
+// { "kty":"oct",
+// "kid":"AQIDBAUGBwgJCgsMDQ4PEA",
+// "k":"FBUWFxgZGhscHR4fICEiIw" }
+// There may be other properties specified, but they are ignored.
+// Ref: http://tools.ietf.org/html/draft-ietf-jose-json-web-key and:
+// http://tools.ietf.org/html/draft-jones-jose-json-private-and-symmetric-key
+//
+// For EME WD, both 'kid' and 'k' are base64 encoded strings, without trailing
+// padding.
+
+// Vector of [key_id, key_value] pairs. Values are raw binary data, stored in
+// strings for convenience.
+typedef std::pair<std::string, std::string> KeyIdAndKeyPair;
+typedef std::vector<KeyIdAndKeyPair> KeyIdAndKeyPairs;
+
+// Converts a single |key|, |key_id| pair to a JSON Web Key Set.
+MEDIA_EXPORT std::string GenerateJWKSet(const uint8* key, int key_length,
+ const uint8* key_id, int key_id_length);
+
+// Extracts the JSON Web Keys from a JSON Web Key Set. If |input| looks like
+// a valid JWK Set, then true is returned and |keys| is updated to contain
+// the list of keys found. Otherwise return false.
+MEDIA_EXPORT bool ExtractKeysFromJWKSet(const std::string& jwk_set,
+ KeyIdAndKeyPairs* keys);
+
+} // namespace media
+
+#endif // MEDIA_CDM_JSON_WEB_KEY_H_
diff --git a/chromium/media/cdm/json_web_key_unittest.cc b/chromium/media/cdm/json_web_key_unittest.cc
new file mode 100644
index 00000000000..1018d173c76
--- /dev/null
+++ b/chromium/media/cdm/json_web_key_unittest.cc
@@ -0,0 +1,186 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/json_web_key.h"
+
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class JSONWebKeyTest : public testing::Test {
+ public:
+ JSONWebKeyTest() {}
+
+ protected:
+ void ExtractJWKKeysAndExpect(const std::string& jwk,
+ bool expected_result,
+ size_t expected_number_of_keys) {
+ DCHECK(!jwk.empty());
+ KeyIdAndKeyPairs keys;
+ EXPECT_EQ(expected_result, ExtractKeysFromJWKSet(jwk, &keys));
+ EXPECT_EQ(expected_number_of_keys, keys.size());
+ }
+};
+
+TEST_F(JSONWebKeyTest, GenerateJWKSet) {
+ const uint8 data1[] = { 0x01, 0x02 };
+ const uint8 data2[] = { 0x01, 0x02, 0x03, 0x04 };
+ const uint8 data3[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 };
+
+ EXPECT_EQ("{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}",
+ GenerateJWKSet(data1, arraysize(data1), data1, arraysize(data1)));
+ EXPECT_EQ(
+ "{\"keys\":[{\"k\":\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}]}",
+ GenerateJWKSet(data2, arraysize(data2), data2, arraysize(data2)));
+ EXPECT_EQ("{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}]}",
+ GenerateJWKSet(data1, arraysize(data1), data2, arraysize(data2)));
+ EXPECT_EQ("{\"keys\":[{\"k\":\"AQIDBA\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}",
+ GenerateJWKSet(data2, arraysize(data2), data1, arraysize(data1)));
+ EXPECT_EQ(
+ "{\"keys\":[{\"k\":\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kid\":"
+ "\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kty\":\"oct\"}]}",
+ GenerateJWKSet(data3, arraysize(data3), data3, arraysize(data3)));
+}
+
+TEST_F(JSONWebKeyTest, ExtractJWKKeys) {
+ // Try a simple JWK key (i.e. not in a set)
+ const std::string kJwkSimple =
+ "{"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ "}";
+ ExtractJWKKeysAndExpect(kJwkSimple, false, 0);
+
+ // Try a key list with multiple entries.
+ const std::string kJwksMultipleEntries =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAwQFBgcICQoLDA0ODxAREhM\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ " },"
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksMultipleEntries, true, 2);
+
+ // Try a key with no spaces and some \n plus additional fields.
+ const std::string kJwksNoSpaces =
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"GawgguFyGrWKav7AX4VKUg"
+ "\",\"foo\":\"bar\"}]}\n\n";
+ ExtractJWKKeysAndExpect(kJwksNoSpaces, true, 1);
+
+ // Try some non-ASCII characters.
+ ExtractJWKKeysAndExpect(
+ "This is not ASCII due to \xff\xfe\xfd in it.", false, 0);
+
+ // Try some non-ASCII characters in an otherwise valid JWK.
+ const std::string kJwksInvalidCharacters =
+ "\n\n{\"something\":1,\"keys\":[{\n\n\"kty\":\"oct\",\"alg\":\"A128KW\","
+ "\"kid\":\"AAECAwQFBgcICQoLDA0ODxAREhM\",\"k\":\"\xff\xfe\xfd"
+ "\",\"foo\":\"bar\"}]}\n\n";
+ ExtractJWKKeysAndExpect(kJwksInvalidCharacters, false, 0);
+
+ // Try a badly formatted key. Assume that the JSON parser is fully tested,
+ // so we won't try a lot of combinations. However, need a test to ensure
+ // that the code doesn't crash if invalid JSON received.
+ ExtractJWKKeysAndExpect("This is not a JSON key.", false, 0);
+
+ // Try passing some valid JSON that is not a dictionary at the top level.
+ ExtractJWKKeysAndExpect("40", false, 0);
+
+ // Try an empty dictionary.
+ ExtractJWKKeysAndExpect("{ }", false, 0);
+
+ // Try an empty 'keys' dictionary.
+ ExtractJWKKeysAndExpect("{ \"keys\": [] }", true, 0);
+
+ // Try with 'keys' not a dictionary.
+ ExtractJWKKeysAndExpect("{ \"keys\":\"1\" }", false, 0);
+
+ // Try with 'keys' a list of integers.
+ ExtractJWKKeysAndExpect("{ \"keys\": [ 1, 2, 3 ] }", false, 0);
+
+ // Try padding(=) at end of 'k' base64 string.
+ const std::string kJwksWithPaddedKey =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw==\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithPaddedKey, false, 0);
+
+ // Try padding(=) at end of 'kid' base64 string.
+ const std::string kJwksWithPaddedKeyId =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"AAECAw==\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithPaddedKeyId, false, 0);
+
+ // Try a key with invalid base64 encoding.
+ const std::string kJwksWithInvalidBase64 =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"!@#$%^&*()\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithInvalidBase64, false, 0);
+
+ // Empty key id.
+ const std::string kJwksWithEmptyKeyId =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"\","
+ " \"k\": \"BAUGBwgJCgsMDQ4PEBESEw\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksWithEmptyKeyId, false, 0);
+
+ // Try a list with multiple keys with the same kid.
+ const std::string kJwksDuplicateKids =
+ "{"
+ " \"keys\": ["
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\": \"FBUWFxgZGhscHR4fICEiIw\""
+ " },"
+ " {"
+ " \"kty\": \"oct\","
+ " \"kid\": \"JCUmJygpKissLS4vMA\","
+ " \"k\":\"MTIzNDU2Nzg5Ojs8PT4/QA\""
+ " }"
+ " ]"
+ "}";
+ ExtractJWKKeysAndExpect(kJwksDuplicateKids, true, 2);
+}
+
+} // namespace media
+
diff --git a/chromium/media/cdm/key_system_names.cc b/chromium/media/cdm/key_system_names.cc
new file mode 100644
index 00000000000..b9eceb2f4db
--- /dev/null
+++ b/chromium/media/cdm/key_system_names.cc
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/key_system_names.h"
+
+#include <string>
+
+namespace media {
+
+const char kPrefixedClearKey[] = "webkit-org.w3.clearkey";
+const char kUnprefixedClearKey[] = "org.w3.clearkey";
+const char kExternalClearKey[] = "org.chromium.externalclearkey";
+
+static bool IsParentKeySystemOf(const std::string& parent_key_system,
+ const std::string& key_system) {
+ std::string prefix = parent_key_system + '.';
+ return key_system.substr(0, prefix.size()) == prefix;
+}
+
+
+bool IsExternalClearKey(const std::string& key_system) {
+ return key_system == kExternalClearKey ||
+ IsParentKeySystemOf(kExternalClearKey, key_system);
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/key_system_names.h b/chromium/media/cdm/key_system_names.h
new file mode 100644
index 00000000000..1b2686911c4
--- /dev/null
+++ b/chromium/media/cdm/key_system_names.h
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_KEY_SYSTEM_NAMES_H_
+#define MEDIA_CDM_KEY_SYSTEM_NAMES_H_
+
+#include <string>
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// TODO(jrummell): Change other uses of Clear Key to use this common value.
+
+// The key system names for Clear Key.
+MEDIA_EXPORT extern const char kPrefixedClearKey[];
+MEDIA_EXPORT extern const char kUnprefixedClearKey[];
+
+// The key system name for External Clear Key.
+MEDIA_EXPORT extern const char kExternalClearKey[];
+
+// Returns true if |key_system| is Clear Key, false otherwise.
+MEDIA_EXPORT inline bool IsClearKey(const std::string& key_system) {
+ return key_system == kPrefixedClearKey || key_system == kUnprefixedClearKey;
+}
+
+// Returns true if |key_system| is External Clear Key, false otherwise.
+MEDIA_EXPORT bool IsExternalClearKey(const std::string& key_system);
+
+} // namespace media
+
+#endif // MEDIA_CDM_KEY_SYSTEM_NAMES_H_
diff --git a/chromium/media/cdm/ppapi/api/OWNERS b/chromium/media/cdm/ppapi/api/OWNERS
new file mode 100644
index 00000000000..554a4be64a0
--- /dev/null
+++ b/chromium/media/cdm/ppapi/api/OWNERS
@@ -0,0 +1,2 @@
+ddorwin@chromium.org
+xhwang@chromium.org
diff --git a/chromium/media/cdm/ppapi/api/codereview.settings b/chromium/media/cdm/ppapi/api/codereview.settings
new file mode 100644
index 00000000000..03fa0d50d95
--- /dev/null
+++ b/chromium/media/cdm/ppapi/api/codereview.settings
@@ -0,0 +1,5 @@
+CODE_REVIEW_SERVER: codereview.chromium.org
+CC_LIST: cdm-api-reviews@chromium.org, feature-media-reviews@chromium.org
+VIEW_VC: https://src.chromium.org/viewvc/chrome?view=rev&revision=
+GITCL_PREUPLOAD: http://src.chromium.org/viewvc/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD&root=chrome
+GITCL_PREDCOMMIT: http://src.chromium.org/viewvc/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD&root=chrome
diff --git a/chromium/media/cdm/ppapi/api/content_decryption_module.h b/chromium/media/cdm/ppapi/api/content_decryption_module.h
index dd518569202..07f5bbd62df 100644
--- a/chromium/media/cdm/ppapi/api/content_decryption_module.h
+++ b/chromium/media/cdm/ppapi/api/content_decryption_module.h
@@ -37,7 +37,15 @@ typedef __int64 int64_t;
// The version number must be rolled when the exported functions are updated!
// If the CDM and the adapter use different versions of these functions, the
// adapter will fail to load or crash!
-#define INITIALIZE_CDM_MODULE InitializeCdmModule_4
+#define CDM_MODULE_VERSION 4
+
+// Build the versioned entrypoint name.
+// The extra macros are necessary to expand version to an actual value.
+#define INITIALIZE_CDM_MODULE \
+ BUILD_ENTRYPOINT(InitializeCdmModule, CDM_MODULE_VERSION)
+#define BUILD_ENTRYPOINT(name, version) \
+ BUILD_ENTRYPOINT_NO_EXPANSION(name, version)
+#define BUILD_ENTRYPOINT_NO_EXPANSION(name, version) name##_##version
extern "C" {
CDM_EXPORT void INITIALIZE_CDM_MODULE();
@@ -59,7 +67,7 @@ typedef void* (*GetCdmHostFunc)(int host_interface_version, void* user_data);
// object.
CDM_EXPORT void* CreateCdmInstance(
int cdm_interface_version,
- const char* key_system, int key_system_size,
+ const char* key_system, uint32_t key_system_size,
GetCdmHostFunc get_cdm_host_func, void* user_data);
CDM_EXPORT const char* GetCdmVersion();
@@ -67,7 +75,14 @@ CDM_EXPORT const char* GetCdmVersion();
namespace cdm {
-class AudioFrames;
+class AudioFrames_1;
+class AudioFrames_2;
+typedef AudioFrames_2 AudioFrames;
+
+class Host_1;
+class Host_2;
+class Host_3;
+
class DecryptedBlock;
class VideoFrame;
@@ -77,18 +92,18 @@ enum Status {
kNoKey, // The required decryption key is not available.
kSessionError, // Session management error.
kDecryptError, // Decryption failed.
- kDecodeError // Error decoding audio or video.
+ kDecodeError, // Error decoding audio or video.
+ kDeferredInitialization // Decoder is not ready for initialization.
};
// This must be consistent with MediaKeyError defined in the spec:
-// http://goo.gl/rbdnR
+// https://dvcs.w3.org/hg/html-media/raw-file/default/encrypted-media/encrypted-media.html#error-codes
+// The error codes are in the process of changing. For now, support the minimum
+// required set with backwards compatible values.
enum MediaKeyError {
kUnknownError = 1,
- kClientError,
- kServiceError,
- kOutputError,
- kHardwareChangeError,
- kDomainError
+ kClientError = 2,
+ kOutputError = 4
};
// An input buffer can be split into several continuous subsamples.
@@ -114,11 +129,11 @@ enum MediaKeyError {
//
// TODO(xhwang): Add checks to make sure these structs have fixed layout.
struct SubsampleEntry {
- SubsampleEntry(int32_t clear_bytes, int32_t cipher_bytes)
+ SubsampleEntry(uint32_t clear_bytes, uint32_t cipher_bytes)
: clear_bytes(clear_bytes), cipher_bytes(cipher_bytes) {}
- int32_t clear_bytes;
- int32_t cipher_bytes;
+ uint32_t clear_bytes;
+ uint32_t cipher_bytes;
};
// Represents an input buffer to be decrypted (and possibly decoded). It does
@@ -137,18 +152,18 @@ struct InputBuffer {
timestamp(0) {}
const uint8_t* data; // Pointer to the beginning of the input data.
- int32_t data_size; // Size (in bytes) of |data|.
+ uint32_t data_size; // Size (in bytes) of |data|.
- int32_t data_offset; // Number of bytes to be discarded before decryption.
+ uint32_t data_offset; // Number of bytes to be discarded before decryption.
const uint8_t* key_id; // Key ID to identify the decryption key.
- int32_t key_id_size; // Size (in bytes) of |key_id|.
+ uint32_t key_id_size; // Size (in bytes) of |key_id|.
const uint8_t* iv; // Initialization vector.
- int32_t iv_size; // Size (in bytes) of |iv|.
+ uint32_t iv_size; // Size (in bytes) of |iv|.
const struct SubsampleEntry* subsamples;
- int32_t num_subsamples; // Number of subsamples in |subsamples|.
+ uint32_t num_subsamples; // Number of subsamples in |subsamples|.
int64_t timestamp; // Presentation timestamp in microseconds.
};
@@ -176,12 +191,23 @@ struct AudioDecoderConfig {
// Optional byte data required to initialize audio decoders, such as the
// vorbis setup header.
uint8_t* extra_data;
- int32_t extra_data_size;
+ uint32_t extra_data_size;
+};
+
+// Supported sample formats for AudioFrames.
+enum AudioFormat {
+ kUnknownAudioFormat = 0, // Unknown format value. Used for error reporting.
+ kAudioFormatU8, // Interleaved unsigned 8-bit w/ bias of 128.
+ kAudioFormatS16, // Interleaved signed 16-bit.
+ kAudioFormatS32, // Interleaved signed 32-bit.
+ kAudioFormatF32, // Interleaved float 32-bit.
+ kAudioFormatPlanarS16, // Signed 16-bit planar.
+ kAudioFormatPlanarF32, // Float 32-bit planar.
};
// Surface formats based on FOURCC labels, see: http://www.fourcc.org/yuv.php
enum VideoFormat {
- kUnknownVideoFormat = 0, // Unknown format value. Used for error reporting.
+ kUnknownVideoFormat = 0, // Unknown format value. Used for error reporting.
kYv12, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
kI420 // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
};
@@ -231,7 +257,7 @@ struct VideoDecoderConfig {
// Optional byte data required to initialize video decoders, such as H.264
// AAVC data.
uint8_t* extra_data;
- int32_t extra_data_size;
+ uint32_t extra_data_size;
};
enum StreamType {
@@ -239,6 +265,47 @@ enum StreamType {
kStreamTypeVideo = 1
};
+// Structure provided to ContentDecryptionModule::OnPlatformChallengeResponse()
+// after a platform challenge was initiated via Host::SendPlatformChallenge().
+// All values will be NULL / zero in the event of a challenge failure.
+struct PlatformChallengeResponse {
+ // |challenge| provided during Host::SendPlatformChallenge() combined with
+ // nonce data and signed with the platform's private key.
+ const uint8_t* signed_data;
+ uint32_t signed_data_length;
+
+ // RSASSA-PKCS1-v1_5-SHA256 signature of the |signed_data| block.
+ const uint8_t* signed_data_signature;
+ uint32_t signed_data_signature_length;
+
+ // X.509 device specific certificate for the |service_id| requested.
+ const uint8_t* platform_key_certificate;
+ uint32_t platform_key_certificate_length;
+};
+
+// Supported output protection methods for use with EnableOutputProtection() and
+// returned by OnQueryOutputProtectionStatus().
+enum OutputProtectionMethods {
+ kProtectionNone = 0,
+ kProtectionHDCP = 1 << 0
+};
+
+// Connected output link types returned by OnQueryOutputProtectionStatus().
+enum OutputLinkTypes {
+ kLinkTypeNone = 0,
+ kLinkTypeUnknown = 1 << 0,
+ kLinkTypeInternal = 1 << 1,
+ kLinkTypeVGA = 1 << 2,
+ kLinkTypeHDMI = 1 << 3,
+ kLinkTypeDVI = 1 << 4,
+ kLinkTypeDisplayPort = 1 << 5,
+ kLinkTypeNetwork = 1 << 6
+};
+
+//
+// WARNING: Deprecated. Will be removed in the near future. CDMs should
+// implement ContentDecryptionModule_2 instead.
+
// ContentDecryptionModule interface that all CDMs need to implement.
// The interface is versioned for backward compatibility.
// Note: ContentDecryptionModule implementations must use the allocator
@@ -247,6 +314,9 @@ enum StreamType {
// when a Buffer is created that will never be returned to the caller.
class ContentDecryptionModule_1 {
public:
+ static const int kVersion = 1;
+ typedef Host_1 Host;
+
// Generates a |key_request| given |type| and |init_data|.
//
// Returns kSuccess if the key request was successfully generated, in which
@@ -254,16 +324,16 @@ class ContentDecryptionModule_1 {
// Returns kSessionError if any error happened, in which case the CDM must
// send a key error by calling Host::SendKeyError().
virtual Status GenerateKeyRequest(
- const char* type, int type_size,
- const uint8_t* init_data, int init_data_size) = 0;
+ const char* type, uint32_t type_size,
+ const uint8_t* init_data, uint32_t init_data_size) = 0;
// Adds the |key| to the CDM to be associated with |key_id|.
//
// Returns kSuccess if the key was successfully added, kSessionError
// otherwise.
- virtual Status AddKey(const char* session_id, int session_id_size,
- const uint8_t* key, int key_size,
- const uint8_t* key_id, int key_id_size) = 0;
+ virtual Status AddKey(const char* session_id, uint32_t session_id_size,
+ const uint8_t* key, uint32_t key_size,
+ const uint8_t* key_id, uint32_t key_id_size) = 0;
// Cancels any pending key request made to the CDM for |session_id|.
//
@@ -271,7 +341,7 @@ class ContentDecryptionModule_1 {
// successfully canceled or there was no key request to be canceled,
// kSessionError otherwise.
virtual Status CancelKeyRequest(
- const char* session_id, int session_id_size) = 0;
+ const char* session_id, uint32_t session_id_size) = 0;
// Performs scheduled operation with |context| when the timer fires.
virtual void TimerExpired(void* context) = 0;
@@ -296,9 +366,6 @@ class ContentDecryptionModule_1 {
// audio decoder is successfully initialized.
// Returns kSessionError if |audio_decoder_config| is not supported. The CDM
// may still be able to do Decrypt().
- //
- // TODO(xhwang): Add stream ID here and in the following audio decoder
- // functions when we need to support multiple audio streams in one CDM.
virtual Status InitializeAudioDecoder(
const AudioDecoderConfig& audio_decoder_config) = 0;
@@ -309,9 +376,6 @@ class ContentDecryptionModule_1 {
// video decoder is successfully initialized.
// Returns kSessionError if |video_decoder_config| is not supported. The CDM
// may still be able to do Decrypt().
- //
- // TODO(xhwang): Add stream ID here and in the following video decoder
- // functions when we need to support multiple video streams in one CDM.
virtual Status InitializeVideoDecoder(
const VideoDecoderConfig& video_decoder_config) = 0;
@@ -361,7 +425,7 @@ class ContentDecryptionModule_1 {
// If the return value is not kSuccess, |audio_frames| should be ignored by
// the caller.
virtual Status DecryptAndDecodeSamples(const InputBuffer& encrypted_buffer,
- AudioFrames* audio_frames) = 0;
+ AudioFrames_1* audio_frames) = 0;
// Destroys the object in the same context as it was created.
virtual void Destroy() = 0;
@@ -371,10 +435,293 @@ class ContentDecryptionModule_1 {
virtual ~ContentDecryptionModule_1() {}
};
-const int kCdmInterfaceVersion_1 = 1;
+// ContentDecryptionModule interface that all CDMs need to implement.
+// The interface is versioned for backward compatibility.
+// Note: ContentDecryptionModule implementations must use the allocator
+// provided in CreateCdmInstance() to allocate any Buffer that needs to
+// be passed back to the caller. Implementations must call Buffer::Destroy()
+// when a Buffer is created that will never be returned to the caller.
+class ContentDecryptionModule_2 {
+ public:
+ static const int kVersion = 2;
+ typedef Host_2 Host;
+
+ // Generates a |key_request| given |type| and |init_data|.
+ //
+ // Returns kSuccess if the key request was successfully generated, in which
+ // case the CDM must send the key message by calling Host::SendKeyMessage().
+ // Returns kSessionError if any error happened, in which case the CDM must
+ // send a key error by calling Host::SendKeyError().
+ virtual Status GenerateKeyRequest(
+ const char* type, uint32_t type_size,
+ const uint8_t* init_data, uint32_t init_data_size) = 0;
+
+ // Adds the |key| to the CDM to be associated with |key_id|.
+ //
+ // Returns kSuccess if the key was successfully added, kSessionError
+ // otherwise.
+ virtual Status AddKey(const char* session_id, uint32_t session_id_size,
+ const uint8_t* key, uint32_t key_size,
+ const uint8_t* key_id, uint32_t key_id_size) = 0;
+
+ // Cancels any pending key request made to the CDM for |session_id|.
+ //
+ // Returns kSuccess if all pending key requests for |session_id| were
+ // successfully canceled or there was no key request to be canceled,
+ // kSessionError otherwise.
+ virtual Status CancelKeyRequest(
+ const char* session_id, uint32_t session_id_size) = 0;
+
+ // Performs scheduled operation with |context| when the timer fires.
+ virtual void TimerExpired(void* context) = 0;
+
+ // Decrypts the |encrypted_buffer|.
+ //
+ // Returns kSuccess if decryption succeeded, in which case the callee
+ // should have filled the |decrypted_buffer| and passed the ownership of
+ // |data| in |decrypted_buffer| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kDecryptError if any other error happened.
+ // If the return value is not kSuccess, |decrypted_buffer| should be ignored
+ // by the caller.
+ virtual Status Decrypt(const InputBuffer& encrypted_buffer,
+ DecryptedBlock* decrypted_buffer) = 0;
+
+ // Initializes the CDM audio decoder with |audio_decoder_config|. This
+ // function must be called before DecryptAndDecodeSamples() is called.
+ //
+ // Returns kSuccess if the |audio_decoder_config| is supported and the CDM
+ // audio decoder is successfully initialized.
+ // Returns kSessionError if |audio_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeAudioDecoder(
+ const AudioDecoderConfig& audio_decoder_config) = 0;
+
+ // Initializes the CDM video decoder with |video_decoder_config|. This
+ // function must be called before DecryptAndDecodeFrame() is called.
+ //
+ // Returns kSuccess if the |video_decoder_config| is supported and the CDM
+ // video decoder is successfully initialized.
+ // Returns kSessionError if |video_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeVideoDecoder(
+ const VideoDecoderConfig& video_decoder_config) = 0;
+
+ // De-initializes the CDM decoder and sets it to an uninitialized state. The
+ // caller can initialize the decoder again after this call to re-initialize
+ // it. This can be used to reconfigure the decoder if the configuration
+ // changes.
+ virtual void DeinitializeDecoder(StreamType decoder_type) = 0;
+
+ // Resets the CDM decoder to an initialized clean state. All internal buffers
+ // MUST be flushed.
+ virtual void ResetDecoder(StreamType decoder_type) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into a
+ // |video_frame|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |video_frame| (|format| == kEmptyVideoFrame) is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled the |video_frame| and passed the ownership of
+ // |frame_buffer| in |video_frame| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // a decoded frame (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |video_frame| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeFrame(const InputBuffer& encrypted_buffer,
+ VideoFrame* video_frame) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into
+ // |audio_frames|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |audio_frames| is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled |audio_frames| and passed the ownership of
+ // |data| in |audio_frames| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // audio samples (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |audio_frames| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeSamples(const InputBuffer& encrypted_buffer,
+ AudioFrames* audio_frames) = 0;
+
+ // Called by the host after a platform challenge was initiated via
+ // Host::SendPlatformChallenge().
+ virtual void OnPlatformChallengeResponse(
+ const PlatformChallengeResponse& response) = 0;
+
+ // Called by the host after a call to Host::QueryOutputProtectionStatus(). The
+ // |link_mask| is a bit mask of OutputLinkTypes and |output_protection_mask|
+ // is a bit mask of OutputProtectionMethods.
+ virtual void OnQueryOutputProtectionStatus(
+ uint32_t link_mask, uint32_t output_protection_mask) = 0;
+
+ // Destroys the object in the same context as it was created.
+ virtual void Destroy() = 0;
+
+ protected:
+ ContentDecryptionModule_2() {}
+ virtual ~ContentDecryptionModule_2() {}
+};
+
+// ContentDecryptionModule interface that all CDMs need to implement.
+// The interface is versioned for backward compatibility.
+// Note: ContentDecryptionModule implementations must use the allocator
+// provided in CreateCdmInstance() to allocate any Buffer that needs to
+// be passed back to the caller. Implementations must call Buffer::Destroy()
+// when a Buffer is created that will never be returned to the caller.
+class ContentDecryptionModule_3 {
+ public:
+ static const int kVersion = 3;
+ typedef Host_3 Host;
+
+ // CreateSession(), UpdateSession(), and ReleaseSession() get passed a
+ // |session_id| for a MediaKeySession object. It must be used in the reply via
+ // Host methods (e.g. Host::OnSessionMessage()).
+ // Note: |session_id| is different from MediaKeySession's sessionId attribute,
+ // which is referred to as |web_session_id| in this file.
+
+ // Creates a session given |type| and |init_data|.
+ virtual void CreateSession(
+ uint32_t session_id,
+ const char* type, uint32_t type_size,
+ const uint8_t* init_data, uint32_t init_data_size) = 0;
-typedef ContentDecryptionModule_1 ContentDecryptionModule;
-const int kCdmInterfaceVersion = kCdmInterfaceVersion_1;
+ // Updates the session with |response|.
+ virtual void UpdateSession(
+ uint32_t session_id,
+ const uint8_t* response, uint32_t response_size) = 0;
+
+ // Releases the resources for the session.
+ virtual void ReleaseSession(uint32_t session_id) = 0;
+
+ // Performs scheduled operation with |context| when the timer fires.
+ virtual void TimerExpired(void* context) = 0;
+
+ // Decrypts the |encrypted_buffer|.
+ //
+ // Returns kSuccess if decryption succeeded, in which case the callee
+ // should have filled the |decrypted_buffer| and passed the ownership of
+ // |data| in |decrypted_buffer| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kDecryptError if any other error happened.
+ // If the return value is not kSuccess, |decrypted_buffer| should be ignored
+ // by the caller.
+ virtual Status Decrypt(const InputBuffer& encrypted_buffer,
+ DecryptedBlock* decrypted_buffer) = 0;
+
+ // Initializes the CDM audio decoder with |audio_decoder_config|. This
+ // function must be called before DecryptAndDecodeSamples() is called.
+ //
+ // Returns kSuccess if the |audio_decoder_config| is supported and the CDM
+ // audio decoder is successfully initialized.
+ // Returns kSessionError if |audio_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeAudioDecoder(
+ const AudioDecoderConfig& audio_decoder_config) = 0;
+
+ // Initializes the CDM video decoder with |video_decoder_config|. This
+ // function must be called before DecryptAndDecodeFrame() is called.
+ //
+ // Returns kSuccess if the |video_decoder_config| is supported and the CDM
+ // video decoder is successfully initialized.
+ // Returns kSessionError if |video_decoder_config| is not supported. The CDM
+ // may still be able to do Decrypt().
+ // Returns kDeferredInitialization if the CDM is not ready to initialize the
+ // decoder at this time. Must call Host::OnDeferredInitializationDone() once
+ // initialization is complete.
+ virtual Status InitializeVideoDecoder(
+ const VideoDecoderConfig& video_decoder_config) = 0;
+
+ // De-initializes the CDM decoder and sets it to an uninitialized state. The
+ // caller can initialize the decoder again after this call to re-initialize
+ // it. This can be used to reconfigure the decoder if the configuration
+ // changes.
+ virtual void DeinitializeDecoder(StreamType decoder_type) = 0;
+
+ // Resets the CDM decoder to an initialized clean state. All internal buffers
+ // MUST be flushed.
+ virtual void ResetDecoder(StreamType decoder_type) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into a
+ // |video_frame|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |video_frame| (|format| == kEmptyVideoFrame) is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled the |video_frame| and passed the ownership of
+ // |frame_buffer| in |video_frame| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // a decoded frame (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |video_frame| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeFrame(const InputBuffer& encrypted_buffer,
+ VideoFrame* video_frame) = 0;
+
+ // Decrypts the |encrypted_buffer| and decodes the decrypted buffer into
+ // |audio_frames|. Upon end-of-stream, the caller should call this function
+ // repeatedly with empty |encrypted_buffer| (|data| == NULL) until only empty
+ // |audio_frames| is produced.
+ //
+ // Returns kSuccess if decryption and decoding both succeeded, in which case
+ // the callee will have filled |audio_frames| and passed the ownership of
+ // |data| in |audio_frames| to the caller.
+ // Returns kNoKey if the CDM did not have the necessary decryption key
+ // to decrypt.
+ // Returns kNeedMoreData if more data was needed by the decoder to generate
+ // audio samples (e.g. during initialization and end-of-stream).
+ // Returns kDecryptError if any decryption error happened.
+ // Returns kDecodeError if any decoding error happened.
+ // If the return value is not kSuccess, |audio_frames| should be ignored by
+ // the caller.
+ virtual Status DecryptAndDecodeSamples(const InputBuffer& encrypted_buffer,
+ AudioFrames* audio_frames) = 0;
+
+ // Called by the host after a platform challenge was initiated via
+ // Host::SendPlatformChallenge().
+ virtual void OnPlatformChallengeResponse(
+ const PlatformChallengeResponse& response) = 0;
+
+ // Called by the host after a call to Host::QueryOutputProtectionStatus(). The
+ // |link_mask| is a bit mask of OutputLinkTypes and |output_protection_mask|
+ // is a bit mask of OutputProtectionMethods.
+ virtual void OnQueryOutputProtectionStatus(
+ uint32_t link_mask, uint32_t output_protection_mask) = 0;
+
+ // Destroys the object in the same context as it was created.
+ virtual void Destroy() = 0;
+
+ protected:
+ ContentDecryptionModule_3() {}
+ virtual ~ContentDecryptionModule_3() {}
+};
+
+typedef ContentDecryptionModule_3 ContentDecryptionModule;
// Represents a buffer created by Allocator implementations.
class Buffer {
@@ -382,10 +729,10 @@ class Buffer {
// Destroys the buffer in the same context as it was created.
virtual void Destroy() = 0;
- virtual int32_t Capacity() const = 0;
+ virtual uint32_t Capacity() const = 0;
virtual uint8_t* Data() = 0;
- virtual void SetSize(int32_t size) = 0;
- virtual int32_t Size() const = 0;
+ virtual void SetSize(uint32_t size) = 0;
+ virtual uint32_t Size() const = 0;
protected:
Buffer() {}
@@ -401,11 +748,13 @@ class Buffer {
// HostFactory object to request a Host interface of a particular version.
class Host_1 {
public:
+ static const int kVersion = 1;
+
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
// guaranteed to be zero initialized. The capacity of the allocated Buffer
// is guaranteed to be not less than |capacity|.
- virtual Buffer* Allocate(int32_t capacity) = 0;
+ virtual Buffer* Allocate(uint32_t capacity) = 0;
// Requests the host to call ContentDecryptionModule::TimerFired() |delay_ms|
// from now with |context|.
@@ -417,14 +766,14 @@ class Host_1 {
// Sends a keymessage event to the application.
// Length parameters should not include null termination.
virtual void SendKeyMessage(
- const char* session_id, int32_t session_id_length,
- const char* message, int32_t message_length,
- const char* default_url, int32_t default_url_length) = 0;
+ const char* session_id, uint32_t session_id_length,
+ const char* message, uint32_t message_length,
+ const char* default_url, uint32_t default_url_length) = 0;
// Sends a keyerror event to the application.
// |session_id_length| should not include null termination.
virtual void SendKeyError(const char* session_id,
- int32_t session_id_length,
+ uint32_t session_id_length,
MediaKeyError error_code,
uint32_t system_code) = 0;
@@ -438,10 +787,145 @@ class Host_1 {
virtual ~Host_1() {}
};
-const int kHostInterfaceVersion_1 = 1;
+class Host_2 {
+ public:
+ static const int kVersion = 2;
+
+ // Returns a Buffer* containing non-zero members upon success, or NULL on
+ // failure. The caller owns the Buffer* after this call. The buffer is not
+ // guaranteed to be zero initialized. The capacity of the allocated Buffer
+ // is guaranteed to be not less than |capacity|.
+ virtual Buffer* Allocate(uint32_t capacity) = 0;
+
+ // Requests the host to call ContentDecryptionModule::TimerFired() |delay_ms|
+ // from now with |context|.
+ virtual void SetTimer(int64_t delay_ms, void* context) = 0;
+
+ // Returns the current epoch wall time in seconds.
+ virtual double GetCurrentWallTimeInSeconds() = 0;
+
+ // Sends a keymessage event to the application.
+ // Length parameters should not include null termination.
+ virtual void SendKeyMessage(
+ const char* session_id, uint32_t session_id_length,
+ const char* message, uint32_t message_length,
+ const char* default_url, uint32_t default_url_length) = 0;
+
+ // Sends a keyerror event to the application.
+ // |session_id_length| should not include null termination.
+ virtual void SendKeyError(const char* session_id,
+ uint32_t session_id_length,
+ MediaKeyError error_code,
+ uint32_t system_code) = 0;
+
+ // Get private data from the host. This function is limited to internal use.
+ virtual void GetPrivateData(int32_t* instance,
+ Host_1::GetPrivateInterface* get_interface) = 0;
+
+ // Sends a platform challenge for the given |service_id|. |challenge| is at
+ // most 256 bits of data to be signed. Once the challenge has been completed,
+ // the host will call ContentDecryptionModule::OnPlatformChallengeResponse()
+ // with the signed challenge response and platform certificate. Length
+ // parameters should not include null termination.
+ virtual void SendPlatformChallenge(
+ const char* service_id, uint32_t service_id_length,
+ const char* challenge, uint32_t challenge_length) = 0;
+
+ // Attempts to enable output protection (e.g. HDCP) on the display link. The
+ // |desired_protection_mask| is a bit mask of OutputProtectionMethods. No
+ // status callback is issued, the CDM must call QueryOutputProtectionStatus()
+ // periodically to ensure the desired protections are applied.
+ virtual void EnableOutputProtection(uint32_t desired_protection_mask) = 0;
+
+ // Requests the current output protection status. Once the host has the status
+ // it will call ContentDecryptionModule::OnQueryOutputProtectionStatus().
+ virtual void QueryOutputProtectionStatus() = 0;
+
+ // Must be called by the CDM if it returned kDeferredInitialization during
+ // InitializeAudioDecoder() or InitializeVideoDecoder().
+ virtual void OnDeferredInitializationDone(StreamType stream_type,
+ Status decoder_status) = 0;
+
+ protected:
+ Host_2() {}
+ virtual ~Host_2() {}
+};
+
+class Host_3 {
+ public:
+ static const int kVersion = 3;
-typedef Host_1 Host;
-const int kHostInterfaceVersion = kHostInterfaceVersion_1;
+ // Returns a Buffer* containing non-zero members upon success, or NULL on
+ // failure. The caller owns the Buffer* after this call. The buffer is not
+ // guaranteed to be zero initialized. The capacity of the allocated Buffer
+ // is guaranteed to be not less than |capacity|.
+ virtual Buffer* Allocate(uint32_t capacity) = 0;
+
+ // Requests the host to call ContentDecryptionModule::TimerFired() |delay_ms|
+ // from now with |context|.
+ virtual void SetTimer(int64_t delay_ms, void* context) = 0;
+
+ // Returns the current epoch wall time in seconds.
+ virtual double GetCurrentWallTimeInSeconds() = 0;
+
+ // Called by the CDM when a session is created and the value for the
+ // MediaKeySession's sessionId attribute is available (|web_session_id|).
+ // This must be called before OnSessionMessage() or OnSessionReady() is called
+ // for |session_id|. |web_session_id_length| should not include null
+ // termination.
+ virtual void OnSessionCreated(
+ uint32_t session_id,
+ const char* web_session_id, uint32_t web_session_id_length) = 0;
+
+ // Called by the CDM when it has a message for session |session_id|.
+ // Length parameters should not include null termination.
+ virtual void OnSessionMessage(
+ uint32_t session_id,
+ const char* message, uint32_t message_length,
+ const char* destination_url, uint32_t destination_url_length) = 0;
+
+ // Called by the CDM when session |session_id| is ready.
+ virtual void OnSessionReady(uint32_t session_id) = 0;
+
+ // Called by the CDM when session |session_id| is closed.
+ virtual void OnSessionClosed(uint32_t session_id) = 0;
+
+ // Called by the CDM when an error occurs in session |session_id|.
+ virtual void OnSessionError(uint32_t session_id,
+ MediaKeyError error_code,
+ uint32_t system_code) = 0;
+
+ // The following are optional methods that may not be implemented on all
+ // platforms.
+
+ // Sends a platform challenge for the given |service_id|. |challenge| is at
+ // most 256 bits of data to be signed. Once the challenge has been completed,
+ // the host will call ContentDecryptionModule::OnPlatformChallengeResponse()
+ // with the signed challenge response and platform certificate. Length
+ // parameters should not include null termination.
+ virtual void SendPlatformChallenge(
+ const char* service_id, uint32_t service_id_length,
+ const char* challenge, uint32_t challenge_length) = 0;
+
+ // Attempts to enable output protection (e.g. HDCP) on the display link. The
+ // |desired_protection_mask| is a bit mask of OutputProtectionMethods. No
+ // status callback is issued, the CDM must call QueryOutputProtectionStatus()
+ // periodically to ensure the desired protections are applied.
+ virtual void EnableOutputProtection(uint32_t desired_protection_mask) = 0;
+
+ // Requests the current output protection status. Once the host has the status
+ // it will call ContentDecryptionModule::OnQueryOutputProtectionStatus().
+ virtual void QueryOutputProtectionStatus() = 0;
+
+ // Must be called by the CDM if it returned kDeferredInitialization during
+ // InitializeAudioDecoder() or InitializeVideoDecoder().
+ virtual void OnDeferredInitializationDone(StreamType stream_type,
+ Status decoder_status) = 0;
+
+ protected:
+ Host_3() {}
+ virtual ~Host_3() {}
+};
// Represents a decrypted block that has not been decoded.
class DecryptedBlock {
@@ -477,11 +961,11 @@ class VideoFrame {
virtual void SetFrameBuffer(Buffer* frame_buffer) = 0;
virtual Buffer* FrameBuffer() = 0;
- virtual void SetPlaneOffset(VideoPlane plane, int32_t offset) = 0;
- virtual int32_t PlaneOffset(VideoPlane plane) = 0;
+ virtual void SetPlaneOffset(VideoPlane plane, uint32_t offset) = 0;
+ virtual uint32_t PlaneOffset(VideoPlane plane) = 0;
- virtual void SetStride(VideoPlane plane, int32_t stride) = 0;
- virtual int32_t Stride(VideoPlane plane) = 0;
+ virtual void SetStride(VideoPlane plane, uint32_t stride) = 0;
+ virtual uint32_t Stride(VideoPlane plane) = 0;
virtual void SetTimestamp(int64_t timestamp) = 0;
virtual int64_t Timestamp() const = 0;
@@ -491,6 +975,10 @@ class VideoFrame {
virtual ~VideoFrame() {}
};
+//
+// WARNING: Deprecated. Will be removed in the near future. CDMs should be
+// implementing ContentDecryptionModule_2 instead which uses AudioFrames_2.
+//
// Represents decrypted and decoded audio frames. AudioFrames can contain
// multiple audio output buffers, which are serialized into this format:
//
@@ -502,14 +990,31 @@ class VideoFrame {
//
// |<----------------- AudioFrames ------------------>|
// | audio buffer 0 | audio buffer 1 | audio buffer 2 |
-class AudioFrames {
+class AudioFrames_1 {
public:
virtual void SetFrameBuffer(Buffer* buffer) = 0;
virtual Buffer* FrameBuffer() = 0;
protected:
- AudioFrames() {}
- virtual ~AudioFrames() {}
+ AudioFrames_1() {}
+ virtual ~AudioFrames_1() {}
+};
+
+// Same as AudioFrames except the format of the data may be specified to avoid
+// unnecessary conversion steps. Planar data should be stored end to end; e.g.,
+// |ch1 sample1||ch1 sample2|....|ch1 sample_last||ch2 sample1|...
+class AudioFrames_2 {
+ public:
+ virtual void SetFrameBuffer(Buffer* buffer) = 0;
+ virtual Buffer* FrameBuffer() = 0;
+
+ // Layout of the audio data. Defaults to kAudioFormatS16.
+ virtual void SetFormat(AudioFormat format) = 0;
+ virtual AudioFormat Format() const = 0;
+
+ protected:
+ AudioFrames_2() {}
+ virtual ~AudioFrames_2() {}
};
} // namespace cdm
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.cc b/chromium/media/cdm/ppapi/cdm_adapter.cc
new file mode 100644
index 00000000000..d92890328a8
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_adapter.cc
@@ -0,0 +1,1041 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/ppapi/cdm_adapter.h"
+
+#include "media/cdm/ppapi/cdm_helpers.h"
+#include "media/cdm/ppapi/cdm_logging.h"
+#include "media/cdm/ppapi/supported_cdm_versions.h"
+#include "ppapi/c/ppb_console.h"
+
+#if defined(CHECK_DOCUMENT_URL)
+#include "ppapi/cpp/dev/url_util_dev.h"
+#include "ppapi/cpp/instance_handle.h"
+#endif // defined(CHECK_DOCUMENT_URL)
+
+namespace {
+
+#if !defined(NDEBUG)
+ #define DLOG_TO_CONSOLE(message) LogToConsole(message);
+#else
+ #define DLOG_TO_CONSOLE(message) (void)(message);
+#endif
+
+bool IsMainThread() {
+ return pp::Module::Get()->core()->IsMainThread();
+}
+
+// Posts a task to run |cb| on the main thread. The task is posted even if the
+// current thread is the main thread.
+void PostOnMain(pp::CompletionCallback cb) {
+ pp::Module::Get()->core()->CallOnMainThread(0, cb, PP_OK);
+}
+
+// Ensures |cb| is called on the main thread, either because the current thread
+// is the main thread or by posting it to the main thread.
+void CallOnMain(pp::CompletionCallback cb) {
+ // TODO(tomfinegan): This is only necessary because PPAPI doesn't allow calls
+ // off the main thread yet. Remove this once the change lands.
+ if (IsMainThread())
+ cb.Run(PP_OK);
+ else
+ PostOnMain(cb);
+}
+
+// Configures a cdm::InputBuffer. |subsamples| must exist as long as
+// |input_buffer| is in use.
+void ConfigureInputBuffer(
+ const pp::Buffer_Dev& encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info,
+ std::vector<cdm::SubsampleEntry>* subsamples,
+ cdm::InputBuffer* input_buffer) {
+ PP_DCHECK(subsamples);
+ PP_DCHECK(!encrypted_buffer.is_null());
+
+ input_buffer->data = static_cast<uint8_t*>(encrypted_buffer.data());
+ input_buffer->data_size = encrypted_block_info.data_size;
+ PP_DCHECK(encrypted_buffer.size() >= input_buffer->data_size);
+ input_buffer->data_offset = encrypted_block_info.data_offset;
+
+ PP_DCHECK(encrypted_block_info.key_id_size <=
+ arraysize(encrypted_block_info.key_id));
+ input_buffer->key_id_size = encrypted_block_info.key_id_size;
+ input_buffer->key_id = input_buffer->key_id_size > 0 ?
+ encrypted_block_info.key_id : NULL;
+
+ PP_DCHECK(encrypted_block_info.iv_size <= arraysize(encrypted_block_info.iv));
+ input_buffer->iv_size = encrypted_block_info.iv_size;
+ input_buffer->iv = encrypted_block_info.iv_size > 0 ?
+ encrypted_block_info.iv : NULL;
+
+ input_buffer->num_subsamples = encrypted_block_info.num_subsamples;
+ if (encrypted_block_info.num_subsamples > 0) {
+ subsamples->reserve(encrypted_block_info.num_subsamples);
+
+ for (uint32_t i = 0; i < encrypted_block_info.num_subsamples; ++i) {
+ subsamples->push_back(cdm::SubsampleEntry(
+ encrypted_block_info.subsamples[i].clear_bytes,
+ encrypted_block_info.subsamples[i].cipher_bytes));
+ }
+
+ input_buffer->subsamples = &(*subsamples)[0];
+ }
+
+ input_buffer->timestamp = encrypted_block_info.tracking_info.timestamp;
+}
+
+PP_DecryptResult CdmStatusToPpDecryptResult(cdm::Status status) {
+ switch (status) {
+ case cdm::kSuccess:
+ return PP_DECRYPTRESULT_SUCCESS;
+ case cdm::kNoKey:
+ return PP_DECRYPTRESULT_DECRYPT_NOKEY;
+ case cdm::kNeedMoreData:
+ return PP_DECRYPTRESULT_NEEDMOREDATA;
+ case cdm::kDecryptError:
+ return PP_DECRYPTRESULT_DECRYPT_ERROR;
+ case cdm::kDecodeError:
+ return PP_DECRYPTRESULT_DECODE_ERROR;
+ default:
+ PP_NOTREACHED();
+ return PP_DECRYPTRESULT_DECODE_ERROR;
+ }
+}
+
+PP_DecryptedFrameFormat CdmVideoFormatToPpDecryptedFrameFormat(
+ cdm::VideoFormat format) {
+ switch (format) {
+ case cdm::kYv12:
+ return PP_DECRYPTEDFRAMEFORMAT_YV12;
+ case cdm::kI420:
+ return PP_DECRYPTEDFRAMEFORMAT_I420;
+ default:
+ return PP_DECRYPTEDFRAMEFORMAT_UNKNOWN;
+ }
+}
+
+PP_DecryptedSampleFormat CdmAudioFormatToPpDecryptedSampleFormat(
+ cdm::AudioFormat format) {
+ switch (format) {
+ case cdm::kAudioFormatU8:
+ return PP_DECRYPTEDSAMPLEFORMAT_U8;
+ case cdm::kAudioFormatS16:
+ return PP_DECRYPTEDSAMPLEFORMAT_S16;
+ case cdm::kAudioFormatS32:
+ return PP_DECRYPTEDSAMPLEFORMAT_S32;
+ case cdm::kAudioFormatF32:
+ return PP_DECRYPTEDSAMPLEFORMAT_F32;
+ case cdm::kAudioFormatPlanarS16:
+ return PP_DECRYPTEDSAMPLEFORMAT_PLANAR_S16;
+ case cdm::kAudioFormatPlanarF32:
+ return PP_DECRYPTEDSAMPLEFORMAT_PLANAR_F32;
+ default:
+ return PP_DECRYPTEDSAMPLEFORMAT_UNKNOWN;
+ }
+}
+
+cdm::AudioDecoderConfig::AudioCodec PpAudioCodecToCdmAudioCodec(
+ PP_AudioCodec codec) {
+ switch (codec) {
+ case PP_AUDIOCODEC_VORBIS:
+ return cdm::AudioDecoderConfig::kCodecVorbis;
+ case PP_AUDIOCODEC_AAC:
+ return cdm::AudioDecoderConfig::kCodecAac;
+ default:
+ return cdm::AudioDecoderConfig::kUnknownAudioCodec;
+ }
+}
+
+cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
+ PP_VideoCodec codec) {
+ switch (codec) {
+ case PP_VIDEOCODEC_VP8:
+ return cdm::VideoDecoderConfig::kCodecVp8;
+ case PP_VIDEOCODEC_H264:
+ return cdm::VideoDecoderConfig::kCodecH264;
+ default:
+ return cdm::VideoDecoderConfig::kUnknownVideoCodec;
+ }
+}
+
+cdm::VideoDecoderConfig::VideoCodecProfile PpVCProfileToCdmVCProfile(
+ PP_VideoCodecProfile profile) {
+ switch (profile) {
+ case PP_VIDEOCODECPROFILE_VP8_MAIN:
+ return cdm::VideoDecoderConfig::kVp8ProfileMain;
+ case PP_VIDEOCODECPROFILE_H264_BASELINE:
+ return cdm::VideoDecoderConfig::kH264ProfileBaseline;
+ case PP_VIDEOCODECPROFILE_H264_MAIN:
+ return cdm::VideoDecoderConfig::kH264ProfileMain;
+ case PP_VIDEOCODECPROFILE_H264_EXTENDED:
+ return cdm::VideoDecoderConfig::kH264ProfileExtended;
+ case PP_VIDEOCODECPROFILE_H264_HIGH:
+ return cdm::VideoDecoderConfig::kH264ProfileHigh;
+ case PP_VIDEOCODECPROFILE_H264_HIGH_10:
+ return cdm::VideoDecoderConfig::kH264ProfileHigh10;
+ case PP_VIDEOCODECPROFILE_H264_HIGH_422:
+ return cdm::VideoDecoderConfig::kH264ProfileHigh422;
+ case PP_VIDEOCODECPROFILE_H264_HIGH_444_PREDICTIVE:
+ return cdm::VideoDecoderConfig::kH264ProfileHigh444Predictive;
+ default:
+ return cdm::VideoDecoderConfig::kUnknownVideoCodecProfile;
+ }
+}
+
+cdm::VideoFormat PpDecryptedFrameFormatToCdmVideoFormat(
+ PP_DecryptedFrameFormat format) {
+ switch (format) {
+ case PP_DECRYPTEDFRAMEFORMAT_YV12:
+ return cdm::kYv12;
+ case PP_DECRYPTEDFRAMEFORMAT_I420:
+ return cdm::kI420;
+ default:
+ return cdm::kUnknownVideoFormat;
+ }
+}
+
+cdm::StreamType PpDecryptorStreamTypeToCdmStreamType(
+ PP_DecryptorStreamType stream_type) {
+ switch (stream_type) {
+ case PP_DECRYPTORSTREAMTYPE_AUDIO:
+ return cdm::kStreamTypeAudio;
+ case PP_DECRYPTORSTREAMTYPE_VIDEO:
+ return cdm::kStreamTypeVideo;
+ }
+
+ PP_NOTREACHED();
+ return cdm::kStreamTypeVideo;
+}
+
+} // namespace
+
+namespace media {
+
+CdmAdapter::CdmAdapter(PP_Instance instance, pp::Module* module)
+ : pp::Instance(instance),
+ pp::ContentDecryptor_Private(this),
+#if defined(OS_CHROMEOS)
+ output_protection_(this),
+ platform_verification_(this),
+ challenge_in_progress_(false),
+ output_link_mask_(0),
+ output_protection_mask_(0),
+ query_output_protection_in_progress_(false),
+#endif
+ allocator_(this),
+ cdm_(NULL),
+ deferred_initialize_audio_decoder_(false),
+ deferred_audio_decoder_config_id_(0),
+ deferred_initialize_video_decoder_(false),
+ deferred_video_decoder_config_id_(0) {
+ callback_factory_.Initialize(this);
+}
+
+CdmAdapter::~CdmAdapter() {}
+
+bool CdmAdapter::CreateCdmInstance(const std::string& key_system) {
+ PP_DCHECK(!cdm_);
+ cdm_ = make_linked_ptr(CdmWrapper::Create(
+ key_system.data(), key_system.size(), GetCdmHost, this));
+ bool success = cdm_ != NULL;
+
+ const std::string message = "CDM instance for " + key_system +
+ (success ? "" : " could not be") + " created.";
+ DLOG_TO_CONSOLE(message);
+ CDM_DLOG() << message;
+
+ return success;
+}
+
+// No KeyErrors should be reported in this function because they cannot be
+// bubbled up in the WD EME API. Those errors will be reported during session
+// creation (CreateSession).
+void CdmAdapter::Initialize(const std::string& key_system) {
+ PP_DCHECK(!key_system.empty());
+ PP_DCHECK(key_system_.empty() || (key_system_ == key_system && cdm_));
+
+ if (!cdm_ && !CreateCdmInstance(key_system))
+ return;
+
+ PP_DCHECK(cdm_);
+ key_system_ = key_system;
+}
+
+void CdmAdapter::CreateSession(uint32_t session_id,
+ const std::string& type,
+ pp::VarArrayBuffer init_data) {
+ // Initialize() doesn't report an error, so CreateSession() can be called
+ // even if Initialize() failed.
+ if (!cdm_) {
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ return;
+ }
+
+#if defined(CHECK_DOCUMENT_URL)
+ PP_URLComponents_Dev url_components = {};
+ const pp::URLUtil_Dev* url_util = pp::URLUtil_Dev::Get();
+ if (!url_util) {
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ return;
+ }
+ pp::Var href = url_util->GetDocumentURL(
+ pp::InstanceHandle(pp_instance()), &url_components);
+ PP_DCHECK(href.is_string());
+ PP_DCHECK(!href.AsString().empty());
+ PP_DCHECK(url_components.host.begin);
+ PP_DCHECK(0 < url_components.host.len);
+#endif // defined(CHECK_DOCUMENT_URL)
+
+ cdm_->CreateSession(session_id,
+ type.data(),
+ type.size(),
+ static_cast<const uint8_t*>(init_data.Map()),
+ init_data.ByteLength());
+}
+
+void CdmAdapter::UpdateSession(uint32_t session_id,
+ pp::VarArrayBuffer response) {
+ // TODO(jrummell): In EME WD, AddKey() can only be called on valid sessions.
+ // We should be able to DCHECK(cdm_) when addressing http://crbug.com/249976.
+ if (!cdm_) {
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ return;
+ }
+
+ const uint8_t* response_ptr = static_cast<const uint8_t*>(response.Map());
+ const uint32_t response_size = response.ByteLength();
+
+ if (!response_ptr || response_size <= 0) {
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ return;
+ }
+ CdmWrapper::Result result =
+ cdm_->UpdateSession(session_id, response_ptr, response_size);
+ switch (result) {
+ case CdmWrapper::NO_ACTION:
+ break;
+ case CdmWrapper::CALL_KEY_ADDED:
+ OnSessionReady(session_id);
+ break;
+ case CdmWrapper::CALL_KEY_ERROR:
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ break;
+ }
+}
+
+void CdmAdapter::ReleaseSession(uint32_t session_id) {
+ // TODO(jrummell): In EME WD, AddKey() can only be called on valid sessions.
+ // We should be able to DCHECK(cdm_) when addressing http://crbug.com/249976.
+ if (!cdm_) {
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ return;
+ }
+
+ CdmWrapper::Result result = cdm_->ReleaseSession(session_id);
+ switch (result) {
+ case CdmWrapper::NO_ACTION:
+ break;
+ case CdmWrapper::CALL_KEY_ADDED:
+ PP_NOTREACHED();
+ break;
+ case CdmWrapper::CALL_KEY_ERROR:
+ OnSessionError(session_id, cdm::kUnknownError, 0);
+ break;
+ }
+}
+
+// Note: In the following decryption/decoding related functions, errors are NOT
+// reported via KeyError, but are reported via corresponding PPB calls.
+
+void CdmAdapter::Decrypt(pp::Buffer_Dev encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info) {
+ PP_DCHECK(!encrypted_buffer.is_null());
+
+ // Release a buffer that the caller indicated it is finished with.
+ allocator_.Release(encrypted_block_info.tracking_info.buffer_id);
+
+ cdm::Status status = cdm::kDecryptError;
+ LinkedDecryptedBlock decrypted_block(new DecryptedBlockImpl());
+
+ if (cdm_) {
+ cdm::InputBuffer input_buffer;
+ std::vector<cdm::SubsampleEntry> subsamples;
+ ConfigureInputBuffer(encrypted_buffer, encrypted_block_info, &subsamples,
+ &input_buffer);
+ status = cdm_->Decrypt(input_buffer, decrypted_block.get());
+ PP_DCHECK(status != cdm::kSuccess ||
+ (decrypted_block->DecryptedBuffer() &&
+ decrypted_block->DecryptedBuffer()->Size()));
+ }
+
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DeliverBlock,
+ status,
+ decrypted_block,
+ encrypted_block_info.tracking_info));
+}
+
+void CdmAdapter::InitializeAudioDecoder(
+ const PP_AudioDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) {
+ PP_DCHECK(!deferred_initialize_audio_decoder_);
+ PP_DCHECK(deferred_audio_decoder_config_id_ == 0);
+ cdm::Status status = cdm::kSessionError;
+ if (cdm_) {
+ cdm::AudioDecoderConfig cdm_decoder_config;
+ cdm_decoder_config.codec =
+ PpAudioCodecToCdmAudioCodec(decoder_config.codec);
+ cdm_decoder_config.channel_count = decoder_config.channel_count;
+ cdm_decoder_config.bits_per_channel = decoder_config.bits_per_channel;
+ cdm_decoder_config.samples_per_second = decoder_config.samples_per_second;
+ cdm_decoder_config.extra_data =
+ static_cast<uint8_t*>(extra_data_buffer.data());
+ cdm_decoder_config.extra_data_size = extra_data_buffer.size();
+ status = cdm_->InitializeAudioDecoder(cdm_decoder_config);
+ }
+
+ if (status == cdm::kDeferredInitialization) {
+ deferred_initialize_audio_decoder_ = true;
+ deferred_audio_decoder_config_id_ = decoder_config.request_id;
+ return;
+ }
+
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DecoderInitializeDone,
+ PP_DECRYPTORSTREAMTYPE_AUDIO,
+ decoder_config.request_id,
+ status == cdm::kSuccess));
+}
+
+void CdmAdapter::InitializeVideoDecoder(
+ const PP_VideoDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) {
+ PP_DCHECK(!deferred_initialize_video_decoder_);
+ PP_DCHECK(deferred_video_decoder_config_id_ == 0);
+ cdm::Status status = cdm::kSessionError;
+ if (cdm_) {
+ cdm::VideoDecoderConfig cdm_decoder_config;
+ cdm_decoder_config.codec =
+ PpVideoCodecToCdmVideoCodec(decoder_config.codec);
+ cdm_decoder_config.profile =
+ PpVCProfileToCdmVCProfile(decoder_config.profile);
+ cdm_decoder_config.format =
+ PpDecryptedFrameFormatToCdmVideoFormat(decoder_config.format);
+ cdm_decoder_config.coded_size.width = decoder_config.width;
+ cdm_decoder_config.coded_size.height = decoder_config.height;
+ cdm_decoder_config.extra_data =
+ static_cast<uint8_t*>(extra_data_buffer.data());
+ cdm_decoder_config.extra_data_size = extra_data_buffer.size();
+ status = cdm_->InitializeVideoDecoder(cdm_decoder_config);
+ }
+
+ if (status == cdm::kDeferredInitialization) {
+ deferred_initialize_video_decoder_ = true;
+ deferred_video_decoder_config_id_ = decoder_config.request_id;
+ return;
+ }
+
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DecoderInitializeDone,
+ PP_DECRYPTORSTREAMTYPE_VIDEO,
+ decoder_config.request_id,
+ status == cdm::kSuccess));
+}
+
+void CdmAdapter::DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) {
+ PP_DCHECK(cdm_); // InitializeXxxxxDecoder should have succeeded.
+ if (cdm_) {
+ cdm_->DeinitializeDecoder(
+ PpDecryptorStreamTypeToCdmStreamType(decoder_type));
+ }
+
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DecoderDeinitializeDone,
+ decoder_type,
+ request_id));
+}
+
+void CdmAdapter::ResetDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) {
+ PP_DCHECK(cdm_); // InitializeXxxxxDecoder should have succeeded.
+ if (cdm_)
+ cdm_->ResetDecoder(PpDecryptorStreamTypeToCdmStreamType(decoder_type));
+
+ CallOnMain(callback_factory_.NewCallback(&CdmAdapter::DecoderResetDone,
+ decoder_type,
+ request_id));
+}
+
+void CdmAdapter::DecryptAndDecode(
+ PP_DecryptorStreamType decoder_type,
+ pp::Buffer_Dev encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info) {
+ PP_DCHECK(cdm_); // InitializeXxxxxDecoder should have succeeded.
+ // Release a buffer that the caller indicated it is finished with.
+ allocator_.Release(encrypted_block_info.tracking_info.buffer_id);
+
+ cdm::InputBuffer input_buffer;
+ std::vector<cdm::SubsampleEntry> subsamples;
+ if (cdm_ && !encrypted_buffer.is_null()) {
+ ConfigureInputBuffer(encrypted_buffer,
+ encrypted_block_info,
+ &subsamples,
+ &input_buffer);
+ }
+
+ cdm::Status status = cdm::kDecodeError;
+
+ switch (decoder_type) {
+ case PP_DECRYPTORSTREAMTYPE_VIDEO: {
+ LinkedVideoFrame video_frame(new VideoFrameImpl());
+ if (cdm_)
+ status = cdm_->DecryptAndDecodeFrame(input_buffer, video_frame.get());
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DeliverFrame,
+ status,
+ video_frame,
+ encrypted_block_info.tracking_info));
+ return;
+ }
+
+ case PP_DECRYPTORSTREAMTYPE_AUDIO: {
+ LinkedAudioFrames audio_frames(new AudioFramesImpl());
+ if (cdm_) {
+ status = cdm_->DecryptAndDecodeSamples(input_buffer,
+ audio_frames.get());
+ }
+ CallOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::DeliverSamples,
+ status,
+ audio_frames,
+ encrypted_block_info.tracking_info));
+ return;
+ }
+
+ default:
+ PP_NOTREACHED();
+ return;
+ }
+}
+
+cdm::Buffer* CdmAdapter::Allocate(uint32_t capacity) {
+ return allocator_.Allocate(capacity);
+}
+
+void CdmAdapter::SetTimer(int64_t delay_ms, void* context) {
+ // NOTE: doesn't really need to run on the main thread; could just as well run
+ // on a helper thread if |cdm_| were thread-friendly and care was taken. We
+ // only use CallOnMainThread() here to get delayed-execution behavior.
+ pp::Module::Get()->core()->CallOnMainThread(
+ delay_ms,
+ callback_factory_.NewCallback(&CdmAdapter::TimerExpired, context),
+ PP_OK);
+}
+
+void CdmAdapter::TimerExpired(int32_t result, void* context) {
+ PP_DCHECK(result == PP_OK);
+ cdm_->TimerExpired(context);
+}
+
+double CdmAdapter::GetCurrentWallTimeInSeconds() {
+ return pp::Module::Get()->core()->GetTime();
+}
+
+void CdmAdapter::SendKeyMessage(
+ const char* session_id, uint32_t session_id_length,
+ const char* message, uint32_t message_length,
+ const char* default_url, uint32_t default_url_length) {
+ PP_DCHECK(!key_system_.empty());
+
+ std::string session_id_str(session_id, session_id_length);
+ PP_DCHECK(!session_id_str.empty());
+ uint32_t session_reference_id = cdm_->LookupSessionId(session_id_str);
+
+ OnSessionCreated(session_reference_id, session_id, session_id_length);
+ OnSessionMessage(session_reference_id,
+ message, message_length,
+ default_url, default_url_length);
+}
+
+void CdmAdapter::SendKeyError(const char* session_id,
+ uint32_t session_id_length,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) {
+ std::string session_id_str(session_id, session_id_length);
+ uint32_t session_reference_id = cdm_->LookupSessionId(session_id_str);
+ OnSessionError(session_reference_id, error_code, system_code);
+}
+
+void CdmAdapter::GetPrivateData(int32_t* instance,
+ GetPrivateInterface* get_interface) {
+ *instance = pp_instance();
+ *get_interface = pp::Module::Get()->get_browser_interface();
+}
+
+void CdmAdapter::OnSessionCreated(uint32_t session_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendSessionCreatedInternal,
+ session_id,
+ std::string(web_session_id, web_session_id_length)));
+}
+
+void CdmAdapter::OnSessionMessage(uint32_t session_id,
+ const char* message,
+ uint32_t message_length,
+ const char* destination_url,
+ uint32_t destination_url_length) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendSessionMessageInternal,
+ session_id,
+ std::vector<uint8>(message, message + message_length),
+ std::string(destination_url, destination_url_length)));
+}
+
+void CdmAdapter::OnSessionReady(uint32_t session_id) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendSessionReadyInternal, session_id));
+}
+
+void CdmAdapter::OnSessionClosed(uint32_t session_id) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendSessionClosedInternal, session_id));
+}
+
+void CdmAdapter::OnSessionError(uint32_t session_id,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) {
+ PostOnMain(callback_factory_.NewCallback(
+ &CdmAdapter::SendSessionErrorInternal,
+ session_id,
+ error_code,
+ system_code));
+}
+
+void CdmAdapter::SendSessionCreatedInternal(int32_t result,
+ uint32_t session_id,
+ const std::string& web_session_id) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::SessionCreated(session_id, web_session_id);
+}
+
+void CdmAdapter::SendSessionMessageInternal(int32_t result,
+ uint32_t session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url) {
+ PP_DCHECK(result == PP_OK);
+
+ pp::VarArrayBuffer message_array_buffer(message.size());
+ if (message.size() > 0) {
+ memcpy(message_array_buffer.Map(), message.data(), message.size());
+ }
+
+ pp::ContentDecryptor_Private::SessionMessage(
+ session_id, message_array_buffer, default_url);
+}
+
+void CdmAdapter::SendSessionReadyInternal(int32_t result, uint32_t session_id) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::SessionReady(session_id);
+}
+
+void CdmAdapter::SendSessionClosedInternal(int32_t result,
+ uint32_t session_id) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::SessionClosed(session_id);
+}
+
+void CdmAdapter::SendSessionErrorInternal(int32_t result,
+ uint32_t session_id,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::SessionError(
+ session_id, error_code, system_code);
+}
+
+void CdmAdapter::DeliverBlock(int32_t result,
+ const cdm::Status& status,
+ const LinkedDecryptedBlock& decrypted_block,
+ const PP_DecryptTrackingInfo& tracking_info) {
+ PP_DCHECK(result == PP_OK);
+ PP_DecryptedBlockInfo decrypted_block_info;
+ decrypted_block_info.tracking_info = tracking_info;
+ decrypted_block_info.tracking_info.timestamp = decrypted_block->Timestamp();
+ decrypted_block_info.tracking_info.buffer_id = 0;
+ decrypted_block_info.data_size = 0;
+ decrypted_block_info.result = CdmStatusToPpDecryptResult(status);
+
+ pp::Buffer_Dev buffer;
+
+ if (decrypted_block_info.result == PP_DECRYPTRESULT_SUCCESS) {
+ PP_DCHECK(decrypted_block.get() && decrypted_block->DecryptedBuffer());
+ if (!decrypted_block.get() || !decrypted_block->DecryptedBuffer()) {
+ PP_NOTREACHED();
+ decrypted_block_info.result = PP_DECRYPTRESULT_DECRYPT_ERROR;
+ } else {
+ PpbBuffer* ppb_buffer =
+ static_cast<PpbBuffer*>(decrypted_block->DecryptedBuffer());
+ buffer = ppb_buffer->buffer_dev();
+ decrypted_block_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
+ decrypted_block_info.data_size = ppb_buffer->Size();
+ }
+ }
+
+ pp::ContentDecryptor_Private::DeliverBlock(buffer, decrypted_block_info);
+}
+
+void CdmAdapter::DecoderInitializeDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id,
+ bool success) {
+ PP_DCHECK(result == PP_OK);
+ pp::ContentDecryptor_Private::DecoderInitializeDone(decoder_type,
+ request_id,
+ success);
+}
+
+void CdmAdapter::DecoderDeinitializeDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) {
+ pp::ContentDecryptor_Private::DecoderDeinitializeDone(decoder_type,
+ request_id);
+}
+
+void CdmAdapter::DecoderResetDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) {
+ pp::ContentDecryptor_Private::DecoderResetDone(decoder_type, request_id);
+}
+
+void CdmAdapter::DeliverFrame(
+ int32_t result,
+ const cdm::Status& status,
+ const LinkedVideoFrame& video_frame,
+ const PP_DecryptTrackingInfo& tracking_info) {
+ PP_DCHECK(result == PP_OK);
+ PP_DecryptedFrameInfo decrypted_frame_info;
+ decrypted_frame_info.tracking_info.request_id = tracking_info.request_id;
+ decrypted_frame_info.tracking_info.buffer_id = 0;
+ decrypted_frame_info.result = CdmStatusToPpDecryptResult(status);
+
+ pp::Buffer_Dev buffer;
+
+ if (decrypted_frame_info.result == PP_DECRYPTRESULT_SUCCESS) {
+ if (!IsValidVideoFrame(video_frame)) {
+ PP_NOTREACHED();
+ decrypted_frame_info.result = PP_DECRYPTRESULT_DECODE_ERROR;
+ } else {
+ PpbBuffer* ppb_buffer =
+ static_cast<PpbBuffer*>(video_frame->FrameBuffer());
+
+ buffer = ppb_buffer->buffer_dev();
+
+ decrypted_frame_info.tracking_info.timestamp = video_frame->Timestamp();
+ decrypted_frame_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
+ decrypted_frame_info.format =
+ CdmVideoFormatToPpDecryptedFrameFormat(video_frame->Format());
+ decrypted_frame_info.width = video_frame->Size().width;
+ decrypted_frame_info.height = video_frame->Size().height;
+ decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_Y] =
+ video_frame->PlaneOffset(cdm::VideoFrame::kYPlane);
+ decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_U] =
+ video_frame->PlaneOffset(cdm::VideoFrame::kUPlane);
+ decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_V] =
+ video_frame->PlaneOffset(cdm::VideoFrame::kVPlane);
+ decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_Y] =
+ video_frame->Stride(cdm::VideoFrame::kYPlane);
+ decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_U] =
+ video_frame->Stride(cdm::VideoFrame::kUPlane);
+ decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_V] =
+ video_frame->Stride(cdm::VideoFrame::kVPlane);
+ }
+ }
+ pp::ContentDecryptor_Private::DeliverFrame(buffer, decrypted_frame_info);
+}
+
+void CdmAdapter::DeliverSamples(int32_t result,
+ const cdm::Status& status,
+ const LinkedAudioFrames& audio_frames,
+ const PP_DecryptTrackingInfo& tracking_info) {
+ PP_DCHECK(result == PP_OK);
+
+ PP_DecryptedSampleInfo decrypted_sample_info;
+ decrypted_sample_info.tracking_info = tracking_info;
+ decrypted_sample_info.tracking_info.timestamp = 0;
+ decrypted_sample_info.tracking_info.buffer_id = 0;
+ decrypted_sample_info.data_size = 0;
+ decrypted_sample_info.result = CdmStatusToPpDecryptResult(status);
+
+ pp::Buffer_Dev buffer;
+
+ if (decrypted_sample_info.result == PP_DECRYPTRESULT_SUCCESS) {
+ PP_DCHECK(audio_frames.get() && audio_frames->FrameBuffer());
+ if (!audio_frames.get() || !audio_frames->FrameBuffer()) {
+ PP_NOTREACHED();
+ decrypted_sample_info.result = PP_DECRYPTRESULT_DECRYPT_ERROR;
+ } else {
+ PpbBuffer* ppb_buffer =
+ static_cast<PpbBuffer*>(audio_frames->FrameBuffer());
+ buffer = ppb_buffer->buffer_dev();
+ decrypted_sample_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
+ decrypted_sample_info.data_size = ppb_buffer->Size();
+ decrypted_sample_info.format =
+ CdmAudioFormatToPpDecryptedSampleFormat(audio_frames->Format());
+ }
+ }
+
+ pp::ContentDecryptor_Private::DeliverSamples(buffer, decrypted_sample_info);
+}
+
+bool CdmAdapter::IsValidVideoFrame(const LinkedVideoFrame& video_frame) {
+ if (!video_frame.get() ||
+ !video_frame->FrameBuffer() ||
+ (video_frame->Format() != cdm::kI420 &&
+ video_frame->Format() != cdm::kYv12)) {
+ CDM_DLOG() << "Invalid video frame!";
+ return false;
+ }
+
+ PpbBuffer* ppb_buffer = static_cast<PpbBuffer*>(video_frame->FrameBuffer());
+
+ for (uint32_t i = 0; i < cdm::VideoFrame::kMaxPlanes; ++i) {
+ int plane_height = (i == cdm::VideoFrame::kYPlane) ?
+ video_frame->Size().height : (video_frame->Size().height + 1) / 2;
+ cdm::VideoFrame::VideoPlane plane =
+ static_cast<cdm::VideoFrame::VideoPlane>(i);
+ if (ppb_buffer->Size() < video_frame->PlaneOffset(plane) +
+ plane_height * video_frame->Stride(plane)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#if !defined(NDEBUG)
+void CdmAdapter::LogToConsole(const pp::Var& value) {
+ PP_DCHECK(IsMainThread());
+ const PPB_Console* console = reinterpret_cast<const PPB_Console*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_CONSOLE_INTERFACE));
+ console->Log(pp_instance(), PP_LOGLEVEL_LOG, value.pp_var());
+}
+#endif // !defined(NDEBUG)
+
+void CdmAdapter::SendPlatformChallenge(
+ const char* service_id, uint32_t service_id_length,
+ const char* challenge, uint32_t challenge_length) {
+#if defined(OS_CHROMEOS)
+ PP_DCHECK(!challenge_in_progress_);
+
+ // Ensure member variables set by the callback are in a clean state.
+ signed_data_output_ = pp::Var();
+ signed_data_signature_output_ = pp::Var();
+ platform_key_certificate_output_ = pp::Var();
+
+ pp::VarArrayBuffer challenge_var(challenge_length);
+ uint8_t* var_data = static_cast<uint8_t*>(challenge_var.Map());
+ memcpy(var_data, challenge, challenge_length);
+
+ std::string service_id_str(service_id, service_id_length);
+ int32_t result = platform_verification_.ChallengePlatform(
+ pp::Var(service_id_str), challenge_var, &signed_data_output_,
+ &signed_data_signature_output_, &platform_key_certificate_output_,
+ callback_factory_.NewCallback(&CdmAdapter::SendPlatformChallengeDone));
+ challenge_var.Unmap();
+ if (result == PP_OK_COMPLETIONPENDING) {
+ challenge_in_progress_ = true;
+ return;
+ }
+
+ // Fall through on error and issue an empty OnPlatformChallengeResponse().
+ PP_DCHECK(result != PP_OK);
+#endif
+
+ cdm::PlatformChallengeResponse response = {};
+ cdm_->OnPlatformChallengeResponse(response);
+}
+
+void CdmAdapter::EnableOutputProtection(uint32_t desired_protection_mask) {
+#if defined(OS_CHROMEOS)
+ int32_t result = output_protection_.EnableProtection(
+ desired_protection_mask, callback_factory_.NewCallback(
+ &CdmAdapter::EnableProtectionDone));
+
+ // Errors are ignored since clients must call QueryOutputProtectionStatus() to
+ // inspect the protection status on a regular basis.
+
+ if (result != PP_OK && result != PP_OK_COMPLETIONPENDING)
+ CDM_DLOG() << __FUNCTION__ << " failed!";
+#endif
+}
+
+void CdmAdapter::QueryOutputProtectionStatus() {
+#if defined(OS_CHROMEOS)
+ PP_DCHECK(!query_output_protection_in_progress_);
+
+ output_link_mask_ = output_protection_mask_ = 0;
+ const int32_t result = output_protection_.QueryStatus(
+ &output_link_mask_,
+ &output_protection_mask_,
+ callback_factory_.NewCallback(
+ &CdmAdapter::QueryOutputProtectionStatusDone));
+ if (result == PP_OK_COMPLETIONPENDING) {
+ query_output_protection_in_progress_ = true;
+ return;
+ }
+
+ // Fall through on error and issue an empty OnQueryOutputProtectionStatus().
+ PP_DCHECK(result != PP_OK);
+#endif
+
+ cdm_->OnQueryOutputProtectionStatus(0, 0);
+}
+
+void CdmAdapter::OnDeferredInitializationDone(cdm::StreamType stream_type,
+ cdm::Status decoder_status) {
+ switch (stream_type) {
+ case cdm::kStreamTypeAudio:
+ PP_DCHECK(deferred_initialize_audio_decoder_);
+ CallOnMain(
+ callback_factory_.NewCallback(&CdmAdapter::DecoderInitializeDone,
+ PP_DECRYPTORSTREAMTYPE_AUDIO,
+ deferred_audio_decoder_config_id_,
+ decoder_status == cdm::kSuccess));
+ deferred_initialize_audio_decoder_ = false;
+ deferred_audio_decoder_config_id_ = 0;
+ break;
+ case cdm::kStreamTypeVideo:
+ PP_DCHECK(deferred_initialize_video_decoder_);
+ CallOnMain(
+ callback_factory_.NewCallback(&CdmAdapter::DecoderInitializeDone,
+ PP_DECRYPTORSTREAMTYPE_VIDEO,
+ deferred_video_decoder_config_id_,
+ decoder_status == cdm::kSuccess));
+ deferred_initialize_video_decoder_ = false;
+ deferred_video_decoder_config_id_ = 0;
+ break;
+ }
+}
+
+#if defined(OS_CHROMEOS)
+void CdmAdapter::SendPlatformChallengeDone(int32_t result) {
+ challenge_in_progress_ = false;
+
+ if (result != PP_OK) {
+ CDM_DLOG() << __FUNCTION__ << ": Platform challenge failed!";
+ cdm::PlatformChallengeResponse response = {};
+ cdm_->OnPlatformChallengeResponse(response);
+ return;
+ }
+
+ pp::VarArrayBuffer signed_data_var(signed_data_output_);
+ pp::VarArrayBuffer signed_data_signature_var(signed_data_signature_output_);
+ std::string platform_key_certificate_string =
+ platform_key_certificate_output_.AsString();
+
+ cdm::PlatformChallengeResponse response = {
+ static_cast<uint8_t*>(signed_data_var.Map()),
+ signed_data_var.ByteLength(),
+
+ static_cast<uint8_t*>(signed_data_signature_var.Map()),
+ signed_data_signature_var.ByteLength(),
+
+ reinterpret_cast<const uint8_t*>(platform_key_certificate_string.c_str()),
+ static_cast<uint32_t>(platform_key_certificate_string.length())
+ };
+ cdm_->OnPlatformChallengeResponse(response);
+
+ signed_data_var.Unmap();
+ signed_data_signature_var.Unmap();
+}
+
+void CdmAdapter::EnableProtectionDone(int32_t result) {
+ // Does nothing since clients must call QueryOutputProtectionStatus() to
+ // inspect the protection status on a regular basis.
+ CDM_DLOG() << __FUNCTION__ << " : " << result;
+}
+
+void CdmAdapter::QueryOutputProtectionStatusDone(int32_t result) {
+ PP_DCHECK(query_output_protection_in_progress_);
+ query_output_protection_in_progress_ = false;
+
+ // Return a protection status of none on error.
+ if (result != PP_OK)
+ output_link_mask_ = output_protection_mask_ = 0;
+
+ cdm_->OnQueryOutputProtectionStatus(output_link_mask_,
+ output_protection_mask_);
+}
+#endif
+
+void* GetCdmHost(int host_interface_version, void* user_data) {
+ if (!host_interface_version || !user_data)
+ return NULL;
+
+ COMPILE_ASSERT(cdm::ContentDecryptionModule::Host::kVersion ==
+ cdm::ContentDecryptionModule_3::Host::kVersion,
+ update_code_below);
+
+ // Ensure IsSupportedCdmHostVersion matches implementation of this function.
+ // Always update this DCHECK when updating this function.
+ // If this check fails, update this function and DCHECK or update
+ // IsSupportedCdmHostVersion.
+ PP_DCHECK(
+ // Future version is not supported.
+ !IsSupportedCdmHostVersion(
+ cdm::ContentDecryptionModule::Host::kVersion + 1) &&
+ // Current version is supported.
+ IsSupportedCdmHostVersion(cdm::ContentDecryptionModule::Host::kVersion) &&
+ // Include all previous supported versions here.
+ IsSupportedCdmHostVersion(cdm::Host_1::kVersion) &&
+ // One older than the oldest supported version is not supported.
+ !IsSupportedCdmHostVersion(cdm::Host_1::kVersion - 1));
+ PP_DCHECK(IsSupportedCdmHostVersion(host_interface_version));
+
+ CdmAdapter* cdm_adapter = static_cast<CdmAdapter*>(user_data);
+ CDM_DLOG() << "Create CDM Host with version " << host_interface_version;
+ switch (host_interface_version) {
+ case cdm::Host_3::kVersion:
+ return static_cast<cdm::Host_3*>(cdm_adapter);
+ case cdm::Host_2::kVersion:
+ return static_cast<cdm::Host_2*>(cdm_adapter);
+ case cdm::Host_1::kVersion:
+ return static_cast<cdm::Host_1*>(cdm_adapter);
+ default:
+ PP_NOTREACHED();
+ return NULL;
+ }
+}
+
+// This object is the global object representing this plugin library as long
+// as it is loaded.
+class CdmAdapterModule : public pp::Module {
+ public:
+ CdmAdapterModule() : pp::Module() {
+ // This function blocks the renderer thread (PluginInstance::Initialize()).
+ // Move this call to other places if this may be a concern in the future.
+ INITIALIZE_CDM_MODULE();
+ }
+ virtual ~CdmAdapterModule() {
+ DeinitializeCdmModule();
+ }
+
+ virtual pp::Instance* CreateInstance(PP_Instance instance) {
+ return new CdmAdapter(instance, this);
+ }
+};
+
+} // namespace media
+
+namespace pp {
+
+// Factory function for your specialization of the Module object.
+Module* CreateModule() {
+ return new media::CdmAdapterModule();
+}
+
+} // namespace pp
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.h b/chromium/media/cdm/ppapi/cdm_adapter.h
new file mode 100644
index 00000000000..d256913aede
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_adapter.h
@@ -0,0 +1,220 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_ADAPTER_H_
+#define MEDIA_CDM_PPAPI_CDM_ADAPTER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "media/cdm/ppapi/cdm_helpers.h"
+#include "media/cdm/ppapi/cdm_wrapper.h"
+#include "media/cdm/ppapi/linked_ptr.h"
+#include "ppapi/c/pp_stdint.h"
+#include "ppapi/c/private/pp_content_decryptor.h"
+#include "ppapi/cpp/completion_callback.h"
+#include "ppapi/cpp/private/content_decryptor_private.h"
+#include "ppapi/cpp/var.h"
+#include "ppapi/cpp/var_array_buffer.h"
+#include "ppapi/utility/completion_callback_factory.h"
+
+#if defined(OS_CHROMEOS)
+#include "ppapi/cpp/private/output_protection_private.h"
+#include "ppapi/cpp/private/platform_verification.h"
+#endif
+
+namespace media {
+
+// GetCdmHostFunc implementation.
+void* GetCdmHost(int host_interface_version, void* user_data);
+
+// An adapter class for abstracting away PPAPI interaction and threading for a
+// Content Decryption Module (CDM).
+class CdmAdapter : public pp::Instance,
+ public pp::ContentDecryptor_Private,
+ public cdm::Host_1,
+ public cdm::Host_2,
+ public cdm::Host_3 {
+ public:
+ CdmAdapter(PP_Instance instance, pp::Module* module);
+ virtual ~CdmAdapter();
+
+ // pp::Instance implementation.
+ virtual bool Init(uint32_t argc, const char* argn[], const char* argv[]) {
+ return true;
+ }
+
+ // PPP_ContentDecryptor_Private implementation.
+ // Note: Results of calls to these methods must be reported through the
+ // PPB_ContentDecryptor_Private interface.
+ virtual void Initialize(const std::string& key_system) OVERRIDE;
+ virtual void CreateSession(uint32_t session_id,
+ const std::string& type,
+ pp::VarArrayBuffer init_data) OVERRIDE;
+ virtual void UpdateSession(uint32_t session_id,
+ pp::VarArrayBuffer response) OVERRIDE;
+ virtual void ReleaseSession(uint32_t session_id) OVERRIDE;
+ virtual void Decrypt(
+ pp::Buffer_Dev encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
+ virtual void InitializeAudioDecoder(
+ const PP_AudioDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) OVERRIDE;
+ virtual void InitializeVideoDecoder(
+ const PP_VideoDecoderConfig& decoder_config,
+ pp::Buffer_Dev extra_data_buffer) OVERRIDE;
+ virtual void DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) OVERRIDE;
+ virtual void ResetDecoder(PP_DecryptorStreamType decoder_type,
+ uint32_t request_id) OVERRIDE;
+ virtual void DecryptAndDecode(
+ PP_DecryptorStreamType decoder_type,
+ pp::Buffer_Dev encrypted_buffer,
+ const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
+
+ // cdm::Host implementation.
+ virtual cdm::Buffer* Allocate(uint32_t capacity) OVERRIDE;
+ virtual void SetTimer(int64_t delay_ms, void* context) OVERRIDE;
+ virtual double GetCurrentWallTimeInSeconds() OVERRIDE;
+ virtual void SendKeyMessage(
+ const char* session_id, uint32_t session_id_length,
+ const char* message, uint32_t message_length,
+ const char* default_url, uint32_t default_url_length) OVERRIDE;
+ virtual void SendKeyError(const char* session_id,
+ uint32_t session_id_length,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) OVERRIDE;
+ virtual void GetPrivateData(int32_t* instance,
+ GetPrivateInterface* get_interface) OVERRIDE;
+
+ // cdm::Host_2 implementation.
+ virtual void SendPlatformChallenge(
+ const char* service_id, uint32_t service_id_length,
+ const char* challenge, uint32_t challenge_length) OVERRIDE;
+ virtual void EnableOutputProtection(
+ uint32_t desired_protection_mask) OVERRIDE;
+ virtual void QueryOutputProtectionStatus() OVERRIDE;
+ virtual void OnDeferredInitializationDone(
+ cdm::StreamType stream_type,
+ cdm::Status decoder_status) OVERRIDE;
+
+ // cdm::Host_3 implementation.
+ virtual void OnSessionCreated(uint32_t session_id,
+ const char* web_session_id,
+ uint32_t web_session_id_length) OVERRIDE;
+ virtual void OnSessionMessage(uint32_t session_id,
+ const char* message,
+ uint32_t message_length,
+ const char* destination_url,
+ uint32_t destination_url_length) OVERRIDE;
+ virtual void OnSessionReady(uint32_t session_id) OVERRIDE;
+ virtual void OnSessionClosed(uint32_t session_id) OVERRIDE;
+ virtual void OnSessionError(uint32_t session_id,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code) OVERRIDE;
+
+ private:
+ typedef linked_ptr<DecryptedBlockImpl> LinkedDecryptedBlock;
+ typedef linked_ptr<VideoFrameImpl> LinkedVideoFrame;
+ typedef linked_ptr<AudioFramesImpl> LinkedAudioFrames;
+
+ bool CreateCdmInstance(const std::string& key_system);
+
+ // <code>PPB_ContentDecryptor_Private</code> dispatchers. These are passed to
+ // <code>callback_factory_</code> to ensure that calls into
+ // <code>PPP_ContentDecryptor_Private</code> are asynchronous.
+ void SendSessionCreatedInternal(int32_t result,
+ uint32_t session_id,
+ const std::string& web_session_id);
+ void SendSessionMessageInternal(int32_t result,
+ uint32_t session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url);
+ void SendSessionReadyInternal(int32_t result, uint32_t session_id);
+ void SendSessionClosedInternal(int32_t result, uint32_t session_id);
+ void SendSessionErrorInternal(int32_t result,
+ uint32_t session_id,
+ cdm::MediaKeyError error_code,
+ uint32_t system_code);
+
+ void DeliverBlock(int32_t result,
+ const cdm::Status& status,
+ const LinkedDecryptedBlock& decrypted_block,
+ const PP_DecryptTrackingInfo& tracking_info);
+ void DecoderInitializeDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id,
+ bool success);
+ void DecoderDeinitializeDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id);
+ void DecoderResetDone(int32_t result,
+ PP_DecryptorStreamType decoder_type,
+ uint32_t request_id);
+ void DeliverFrame(int32_t result,
+ const cdm::Status& status,
+ const LinkedVideoFrame& video_frame,
+ const PP_DecryptTrackingInfo& tracking_info);
+ void DeliverSamples(int32_t result,
+ const cdm::Status& status,
+ const LinkedAudioFrames& audio_frames,
+ const PP_DecryptTrackingInfo& tracking_info);
+
+ // Helper for SetTimer().
+ void TimerExpired(int32_t result, void* context);
+
+ bool IsValidVideoFrame(const LinkedVideoFrame& video_frame);
+
+#if !defined(NDEBUG)
+ // Logs the given message to the JavaScript console associated with the
+ // CDM adapter instance. The name of the CDM adapter issuing the log message
+ // will be automatically prepended to the message.
+ void LogToConsole(const pp::Var& value);
+#endif // !defined(NDEBUG)
+
+#if defined(OS_CHROMEOS)
+ void SendPlatformChallengeDone(int32_t result);
+ void EnableProtectionDone(int32_t result);
+ void QueryOutputProtectionStatusDone(int32_t result);
+
+ pp::OutputProtection_Private output_protection_;
+ pp::PlatformVerification platform_verification_;
+
+ // Since PPAPI doesn't provide handlers for CompletionCallbacks with more than
+ // one output we need to manage our own. These values are only read by
+ // SendPlatformChallengeDone().
+ pp::Var signed_data_output_;
+ pp::Var signed_data_signature_output_;
+ pp::Var platform_key_certificate_output_;
+ bool challenge_in_progress_;
+
+ // Same as above, these are only read by QueryOutputProtectionStatusDone().
+ uint32_t output_link_mask_;
+ uint32_t output_protection_mask_;
+ bool query_output_protection_in_progress_;
+#endif
+
+ PpbBufferAllocator allocator_;
+ pp::CompletionCallbackFactory<CdmAdapter> callback_factory_;
+ linked_ptr<CdmWrapper> cdm_;
+ std::string key_system_;
+
+ // If the CDM returned kDeferredInitialization during InitializeAudioDecoder()
+ // or InitializeVideoDecoder(), the (Audio|Video)DecoderConfig.request_id is
+ // saved for the future call to OnDeferredInitializationDone().
+ bool deferred_initialize_audio_decoder_;
+ uint32_t deferred_audio_decoder_config_id_;
+ bool deferred_initialize_video_decoder_;
+ uint32_t deferred_video_decoder_config_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmAdapter);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_ADAPTER_H_
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.cc b/chromium/media/cdm/ppapi/cdm_helpers.cc
new file mode 100644
index 00000000000..36b95021f8c
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_helpers.cc
@@ -0,0 +1,102 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/ppapi/cdm_helpers.h"
+
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "ppapi/c/pp_errors.h"
+#include "ppapi/c/pp_stdint.h"
+#include "ppapi/cpp/core.h"
+#include "ppapi/cpp/dev/buffer_dev.h"
+#include "ppapi/cpp/instance.h"
+#include "ppapi/cpp/logging.h"
+#include "ppapi/cpp/module.h"
+
+namespace media {
+
+cdm::Buffer* PpbBufferAllocator::Allocate(uint32_t capacity) {
+ PP_DCHECK(pp::Module::Get()->core()->IsMainThread());
+
+ if (!capacity)
+ return NULL;
+
+ pp::Buffer_Dev buffer;
+ uint32_t buffer_id = 0;
+
+ // Reuse a buffer in the free list if there is one that fits |capacity|.
+ // Otherwise, create a new one.
+ FreeBufferMap::iterator found = free_buffers_.lower_bound(capacity);
+ if (found == free_buffers_.end()) {
+ // TODO(xhwang): Report statistics about how many new buffers are allocated.
+ buffer = AllocateNewBuffer(capacity);
+ if (buffer.is_null())
+ return NULL;
+ buffer_id = next_buffer_id_++;
+ } else {
+ buffer = found->second.second;
+ buffer_id = found->second.first;
+ free_buffers_.erase(found);
+ }
+
+ allocated_buffers_.insert(std::make_pair(buffer_id, buffer));
+
+ return PpbBuffer::Create(buffer, buffer_id);
+}
+
+void PpbBufferAllocator::Release(uint32_t buffer_id) {
+ if (!buffer_id)
+ return;
+
+ AllocatedBufferMap::iterator found = allocated_buffers_.find(buffer_id);
+ if (found == allocated_buffers_.end())
+ return;
+
+ pp::Buffer_Dev& buffer = found->second;
+ free_buffers_.insert(
+ std::make_pair(buffer.size(), std::make_pair(buffer_id, buffer)));
+
+ allocated_buffers_.erase(found);
+}
+
+pp::Buffer_Dev PpbBufferAllocator::AllocateNewBuffer(uint32_t capacity) {
+ // Always pad new allocated buffer so that we don't need to reallocate
+ // buffers frequently if requested sizes fluctuate slightly.
+ static const uint32_t kBufferPadding = 512;
+
+ // Maximum number of free buffers we can keep when allocating new buffers.
+ static const uint32_t kFreeLimit = 3;
+
+ // Destroy the smallest buffer before allocating a new bigger buffer if the
+ // number of free buffers exceeds a limit. This mechanism helps avoid ending
+ // up with too many small buffers, which could happen if the size to be
+ // allocated keeps increasing.
+ if (free_buffers_.size() >= kFreeLimit)
+ free_buffers_.erase(free_buffers_.begin());
+
+ // Creation of pp::Buffer_Dev is expensive! It involves synchronous IPC calls.
+ // That's why we try to avoid AllocateNewBuffer() as much as we can.
+ return pp::Buffer_Dev(instance_, capacity + kBufferPadding);
+}
+
+VideoFrameImpl::VideoFrameImpl()
+ : format_(cdm::kUnknownVideoFormat),
+ frame_buffer_(NULL),
+ timestamp_(0) {
+ for (uint32_t i = 0; i < kMaxPlanes; ++i) {
+ plane_offsets_[i] = 0;
+ strides_[i] = 0;
+ }
+}
+
+VideoFrameImpl::~VideoFrameImpl() {
+ if (frame_buffer_)
+ frame_buffer_->Destroy();
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_helpers.h b/chromium/media/cdm/ppapi/cdm_helpers.h
new file mode 100644
index 00000000000..cb9203e105a
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_helpers.h
@@ -0,0 +1,230 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_HELPERS_H_
+#define MEDIA_CDM_PPAPI_CDM_HELPERS_H_
+
+#include <map>
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "ppapi/c/pp_errors.h"
+#include "ppapi/c/pp_stdint.h"
+#include "ppapi/cpp/dev/buffer_dev.h"
+#include "ppapi/cpp/instance.h"
+#include "ppapi/cpp/logging.h"
+
+namespace media {
+
+// cdm::Buffer implementation that provides access to memory owned by a
+// pp::Buffer_Dev.
+// This class holds a reference to the Buffer_Dev throughout its lifetime.
+// TODO(xhwang): Find a better name. It's confusing to have PpbBuffer,
+// pp::Buffer_Dev and PPB_Buffer_Dev.
+class PpbBuffer : public cdm::Buffer {
+ public:
+ static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id) {
+ PP_DCHECK(buffer.data());
+ PP_DCHECK(buffer.size());
+ PP_DCHECK(buffer_id);
+ return new PpbBuffer(buffer, buffer_id);
+ }
+
+ // cdm::Buffer implementation.
+ virtual void Destroy() OVERRIDE { delete this; }
+
+ virtual uint32_t Capacity() const OVERRIDE { return buffer_.size(); }
+
+ virtual uint8_t* Data() OVERRIDE {
+ return static_cast<uint8_t*>(buffer_.data());
+ }
+
+ virtual void SetSize(uint32_t size) OVERRIDE {
+ PP_DCHECK(size <= Capacity());
+ if (size > Capacity()) {
+ size_ = 0;
+ return;
+ }
+
+ size_ = size;
+ }
+
+ virtual uint32_t Size() const OVERRIDE { return size_; }
+
+ pp::Buffer_Dev buffer_dev() const { return buffer_; }
+
+ uint32_t buffer_id() const { return buffer_id_; }
+
+ private:
+ PpbBuffer(pp::Buffer_Dev buffer, uint32_t buffer_id)
+ : buffer_(buffer),
+ buffer_id_(buffer_id),
+ size_(0) {}
+ virtual ~PpbBuffer() {}
+
+ pp::Buffer_Dev buffer_;
+ uint32_t buffer_id_;
+ uint32_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(PpbBuffer);
+};
+
+class PpbBufferAllocator {
+ public:
+ explicit PpbBufferAllocator(pp::Instance* instance)
+ : instance_(instance),
+ next_buffer_id_(1) {}
+ ~PpbBufferAllocator() {}
+
+ cdm::Buffer* Allocate(uint32_t capacity);
+
+ // Releases the buffer with |buffer_id|. A buffer can be recycled after
+ // it is released.
+ void Release(uint32_t buffer_id);
+
+ private:
+ typedef std::map<uint32_t, pp::Buffer_Dev> AllocatedBufferMap;
+ typedef std::multimap<uint32_t, std::pair<uint32_t, pp::Buffer_Dev> >
+ FreeBufferMap;
+
+ pp::Buffer_Dev AllocateNewBuffer(uint32_t capacity);
+
+ pp::Instance* const instance_;
+ uint32_t next_buffer_id_;
+ AllocatedBufferMap allocated_buffers_;
+ FreeBufferMap free_buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(PpbBufferAllocator);
+};
+
+class DecryptedBlockImpl : public cdm::DecryptedBlock {
+ public:
+ DecryptedBlockImpl() : buffer_(NULL), timestamp_(0) {}
+ virtual ~DecryptedBlockImpl() { if (buffer_) buffer_->Destroy(); }
+
+ virtual void SetDecryptedBuffer(cdm::Buffer* buffer) OVERRIDE {
+ buffer_ = static_cast<PpbBuffer*>(buffer);
+ }
+ virtual cdm::Buffer* DecryptedBuffer() OVERRIDE { return buffer_; }
+
+ virtual void SetTimestamp(int64_t timestamp) OVERRIDE {
+ timestamp_ = timestamp;
+ }
+ virtual int64_t Timestamp() const OVERRIDE { return timestamp_; }
+
+ private:
+ PpbBuffer* buffer_;
+ int64_t timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecryptedBlockImpl);
+};
+
+class VideoFrameImpl : public cdm::VideoFrame {
+ public:
+ VideoFrameImpl();
+ virtual ~VideoFrameImpl();
+
+ virtual void SetFormat(cdm::VideoFormat format) OVERRIDE {
+ format_ = format;
+ }
+ virtual cdm::VideoFormat Format() const OVERRIDE { return format_; }
+
+ virtual void SetSize(cdm::Size size) OVERRIDE { size_ = size; }
+ virtual cdm::Size Size() const OVERRIDE { return size_; }
+
+ virtual void SetFrameBuffer(cdm::Buffer* frame_buffer) OVERRIDE {
+ frame_buffer_ = static_cast<PpbBuffer*>(frame_buffer);
+ }
+ virtual cdm::Buffer* FrameBuffer() OVERRIDE { return frame_buffer_; }
+
+ virtual void SetPlaneOffset(cdm::VideoFrame::VideoPlane plane,
+ uint32_t offset) OVERRIDE {
+ PP_DCHECK(plane < kMaxPlanes);
+ plane_offsets_[plane] = offset;
+ }
+ virtual uint32_t PlaneOffset(VideoPlane plane) OVERRIDE {
+ PP_DCHECK(plane < kMaxPlanes);
+ return plane_offsets_[plane];
+ }
+
+ virtual void SetStride(VideoPlane plane, uint32_t stride) OVERRIDE {
+ PP_DCHECK(plane < kMaxPlanes);
+ strides_[plane] = stride;
+ }
+ virtual uint32_t Stride(VideoPlane plane) OVERRIDE {
+ PP_DCHECK(plane < kMaxPlanes);
+ return strides_[plane];
+ }
+
+ virtual void SetTimestamp(int64_t timestamp) OVERRIDE {
+ timestamp_ = timestamp;
+ }
+ virtual int64_t Timestamp() const OVERRIDE { return timestamp_; }
+
+ private:
+ // The video buffer format.
+ cdm::VideoFormat format_;
+
+ // Width and height of the video frame.
+ cdm::Size size_;
+
+ // The video frame buffer.
+ PpbBuffer* frame_buffer_;
+
+ // Array of data pointers to each plane in the video frame buffer.
+ uint32_t plane_offsets_[kMaxPlanes];
+
+ // Array of strides for each plane, typically greater or equal to the width
+ // of the surface divided by the horizontal sampling period. Note that
+ // strides can be negative.
+ uint32_t strides_[kMaxPlanes];
+
+ // Presentation timestamp in microseconds.
+ int64_t timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameImpl);
+};
+
+class AudioFramesImpl : public cdm::AudioFrames_1,
+ public cdm::AudioFrames_2 {
+ public:
+ AudioFramesImpl() : buffer_(NULL), format_(cdm::kUnknownAudioFormat) {}
+ virtual ~AudioFramesImpl() {
+ if (buffer_)
+ buffer_->Destroy();
+ }
+
+ // AudioFrames implementation.
+ virtual void SetFrameBuffer(cdm::Buffer* buffer) OVERRIDE {
+ buffer_ = static_cast<PpbBuffer*>(buffer);
+ }
+ virtual cdm::Buffer* FrameBuffer() OVERRIDE {
+ return buffer_;
+ }
+ virtual void SetFormat(cdm::AudioFormat format) OVERRIDE {
+ format_ = format;
+ }
+ virtual cdm::AudioFormat Format() const OVERRIDE {
+ return format_;
+ }
+
+ cdm::Buffer* PassFrameBuffer() {
+ PpbBuffer* temp_buffer = buffer_;
+ buffer_ = NULL;
+ return temp_buffer;
+ }
+
+ private:
+ PpbBuffer* buffer_;
+ cdm::AudioFormat format_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioFramesImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_HELPERS_H_
diff --git a/chromium/media/cdm/ppapi/cdm_logging.cc b/chromium/media/cdm/ppapi/cdm_logging.cc
new file mode 100644
index 00000000000..ff05930d9c7
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_logging.cc
@@ -0,0 +1,137 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Only compile this file in debug build. This gives us one more level of
+// protection that if the linker tries to link in strings/symbols appended to
+// "DLOG() <<" in release build (which it shouldn't), we'll get "undefined
+// reference" errors.
+#if !defined(NDEBUG)
+
+#include "media/cdm/ppapi/cdm_logging.h"
+
+#include "base/basictypes.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h>
+#elif defined(OS_POSIX)
+#include <sys/syscall.h>
+#include <time.h>
+#endif
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <iomanip>
+#include <string>
+
+namespace media {
+
+namespace {
+
+// Helper functions to wrap platform differences.
+
+int32 CurrentProcessId() {
+#if defined(OS_WIN)
+ return GetCurrentProcessId();
+#elif defined(OS_POSIX)
+ return getpid();
+#endif
+}
+
+int32 CurrentThreadId() {
+ // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+ // into the kernel.
+#if defined(OS_LINUX)
+ return syscall(__NR_gettid);
+#elif defined(OS_ANDROID)
+ return gettid();
+#elif defined(OS_SOLARIS)
+ return pthread_self();
+#elif defined(OS_POSIX)
+ return reinterpret_cast<int64>(pthread_self());
+#elif defined(OS_WIN)
+ return static_cast<int32>(::GetCurrentThreadId());
+#endif
+}
+
+uint64 TickCount() {
+#if defined(OS_WIN)
+ return GetTickCount();
+#elif defined(OS_MACOSX)
+ return mach_absolute_time();
+#elif defined(OS_POSIX)
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ uint64 absolute_micro =
+ static_cast<int64>(ts.tv_sec) * 1000000 +
+ static_cast<int64>(ts.tv_nsec) / 1000;
+
+ return absolute_micro;
+#endif
+}
+
+} // namespace
+
+CdmLogMessage::CdmLogMessage(const char* file, int line) {
+ std::string filename(file);
+ size_t last_slash_pos = filename.find_last_of("\\/");
+ if (last_slash_pos != std::string::npos)
+ filename = filename.substr(last_slash_pos + 1);
+
+ stream_ << '[';
+
+ // Process and thread ID.
+ stream_ << CurrentProcessId() << ':';
+ stream_ << CurrentThreadId() << ':';
+
+ // Time and tick count.
+ time_t t = time(NULL);
+ struct tm local_time = {0};
+#if _MSC_VER >= 1400
+ localtime_s(&local_time, &t);
+#else
+ localtime_r(&t, &local_time);
+#endif
+ struct tm* tm_time = &local_time;
+ stream_ << std::setfill('0')
+ << std::setw(2) << 1 + tm_time->tm_mon
+ << std::setw(2) << tm_time->tm_mday
+ << '/'
+ << std::setw(2) << tm_time->tm_hour
+ << std::setw(2) << tm_time->tm_min
+ << std::setw(2) << tm_time->tm_sec
+ << ':';
+ stream_ << TickCount() << ':';
+
+ // File name.
+ stream_ << filename << "(" << line << ")] ";
+}
+
+CdmLogMessage::~CdmLogMessage() {
+ // Use std::cout explicitly for the line break. This limits the use of this
+ // class only to the definition of DLOG() (which also uses std::cout).
+ //
+ // This appends "std::endl" after all other messages appended to DLOG(),
+ // which relies on the C++ standard ISO/IEC 14882:1998(E) $12.2.3:
+ // "Temporary objects are destroyed as the last step in evaluating the
+ // full-expression (1.9) that (lexically) contains the point where they were
+ // created."
+ std::cout << std::endl;
+}
+
+} // namespace media
+
+#endif // !defined(NDEBUG)
diff --git a/chromium/media/cdm/ppapi/cdm_logging.h b/chromium/media/cdm/ppapi/cdm_logging.h
new file mode 100644
index 00000000000..a7059182ff7
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_logging.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines useful logging macros/methods for CDM adapter.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_LOGGING_H_
+#define MEDIA_CDM_PPAPI_CDM_LOGGING_H_
+
+#include <iostream>
+#include <sstream>
+#include <string>
+
+namespace media {
+
+namespace {
+
+// The following classes/macros are adapted from base/logging.h.
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() {}
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(std::ostream&) {}
+};
+
+} // namespace
+
+// This class serves two purposes:
+// (1) It adds common headers to the log message, e.g. timestamp, process ID.
+// (2) It adds a line break at the end of the log message.
+// This class is copied and modified from base/logging.* but is quite different
+// in terms of how things work. This class is designed to work only with the
+// CDM_DLOG() defined below and should not be used for other purposes.
+class CdmLogMessage {
+ public:
+ CdmLogMessage(const char* file, int line);
+ ~CdmLogMessage();
+
+ std::string message() { return stream_.str(); }
+
+ private:
+ std::ostringstream stream_;
+};
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold.
+#define CDM_LAZY_STREAM(stream, condition) \
+ !(condition) ? (void) 0 : LogMessageVoidify() & (stream)
+
+#define CDM_DLOG() CDM_LAZY_STREAM(std::cout, CDM_DLOG_IS_ON()) \
+ << CdmLogMessage(__FILE__, __LINE__).message()
+
+#if defined(NDEBUG)
+#define CDM_DLOG_IS_ON() false
+#else
+#define CDM_DLOG_IS_ON() true
+#endif
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_LOGGING_H_
diff --git a/chromium/media/cdm/ppapi/cdm_video_decoder.cc b/chromium/media/cdm/ppapi/cdm_video_decoder.cc
index 95523d4c78d..0477c0a3835 100644
--- a/chromium/media/cdm/ppapi/cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/cdm_video_decoder.cc
@@ -21,7 +21,7 @@
namespace media {
scoped_ptr<CdmVideoDecoder> CreateVideoDecoder(
- cdm::Host* host, const cdm::VideoDecoderConfig& config) {
+ ClearKeyCdmHost* host, const cdm::VideoDecoderConfig& config) {
scoped_ptr<CdmVideoDecoder> video_decoder;
#if defined(CLEAR_KEY_CDM_USE_FAKE_VIDEO_DECODER)
video_decoder.reset(new FakeCdmVideoDecoder(host));
diff --git a/chromium/media/cdm/ppapi/cdm_video_decoder.h b/chromium/media/cdm/ppapi/cdm_video_decoder.h
index 25d48f46b92..3eefb63584d 100644
--- a/chromium/media/cdm/ppapi/cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/cdm_video_decoder.h
@@ -8,6 +8,7 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "media/cdm/ppapi/clear_key_cdm_common.h"
namespace media {
@@ -36,7 +37,7 @@ class CdmVideoDecoder {
// |config.codec|. Returns a scoped_ptr containing a non-null initialized
// CdmVideoDecoder* upon success.
scoped_ptr<CdmVideoDecoder> CreateVideoDecoder(
- cdm::Host* host, const cdm::VideoDecoderConfig& config);
+ ClearKeyCdmHost* host, const cdm::VideoDecoderConfig& config);
} // namespace media
diff --git a/chromium/media/cdm/ppapi/cdm_wrapper.cc b/chromium/media/cdm/ppapi/cdm_wrapper.cc
deleted file mode 100644
index 66ae43d4d65..00000000000
--- a/chromium/media/cdm/ppapi/cdm_wrapper.cc
+++ /dev/null
@@ -1,1196 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstring>
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "media/cdm/ppapi/api/content_decryption_module.h"
-#include "media/cdm/ppapi/linked_ptr.h"
-#include "ppapi/c/pp_errors.h"
-#include "ppapi/c/pp_stdint.h"
-#include "ppapi/c/private/pp_content_decryptor.h"
-#include "ppapi/cpp/completion_callback.h"
-#include "ppapi/cpp/core.h"
-#include "ppapi/cpp/dev/buffer_dev.h"
-#include "ppapi/cpp/instance.h"
-#include "ppapi/cpp/logging.h"
-#include "ppapi/cpp/module.h"
-#include "ppapi/cpp/pass_ref.h"
-#include "ppapi/cpp/private/content_decryptor_private.h"
-#include "ppapi/cpp/resource.h"
-#include "ppapi/cpp/var.h"
-#include "ppapi/cpp/var_array_buffer.h"
-#include "ppapi/utility/completion_callback_factory.h"
-
-#if defined(CHECK_DOCUMENT_URL)
-#include "ppapi/cpp/dev/url_util_dev.h"
-#include "ppapi/cpp/instance_handle.h"
-#endif // defined(CHECK_DOCUMENT_URL)
-
-namespace {
-
-bool IsMainThread() {
- return pp::Module::Get()->core()->IsMainThread();
-}
-
-// Posts a task to run |cb| on the main thread. The task is posted even if the
-// current thread is the main thread.
-void PostOnMain(pp::CompletionCallback cb) {
- pp::Module::Get()->core()->CallOnMainThread(0, cb, PP_OK);
-}
-
-// Ensures |cb| is called on the main thread, either because the current thread
-// is the main thread or by posting it to the main thread.
-void CallOnMain(pp::CompletionCallback cb) {
- // TODO(tomfinegan): This is only necessary because PPAPI doesn't allow calls
- // off the main thread yet. Remove this once the change lands.
- if (IsMainThread())
- cb.Run(PP_OK);
- else
- PostOnMain(cb);
-}
-
-// Configures a cdm::InputBuffer. |subsamples| must exist as long as
-// |input_buffer| is in use.
-void ConfigureInputBuffer(
- const pp::Buffer_Dev& encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info,
- std::vector<cdm::SubsampleEntry>* subsamples,
- cdm::InputBuffer* input_buffer) {
- PP_DCHECK(subsamples);
- PP_DCHECK(!encrypted_buffer.is_null());
-
- input_buffer->data = static_cast<uint8_t*>(encrypted_buffer.data());
- input_buffer->data_size = encrypted_block_info.data_size;
- PP_DCHECK(encrypted_buffer.size() >=
- static_cast<uint32_t>(input_buffer->data_size));
- input_buffer->data_offset = encrypted_block_info.data_offset;
-
- PP_DCHECK(encrypted_block_info.key_id_size <=
- arraysize(encrypted_block_info.key_id));
- input_buffer->key_id_size = encrypted_block_info.key_id_size;
- input_buffer->key_id = input_buffer->key_id_size > 0 ?
- encrypted_block_info.key_id : NULL;
-
- PP_DCHECK(encrypted_block_info.iv_size <= arraysize(encrypted_block_info.iv));
- input_buffer->iv_size = encrypted_block_info.iv_size;
- input_buffer->iv = encrypted_block_info.iv_size > 0 ?
- encrypted_block_info.iv : NULL;
-
- input_buffer->num_subsamples = encrypted_block_info.num_subsamples;
- if (encrypted_block_info.num_subsamples > 0) {
- subsamples->reserve(encrypted_block_info.num_subsamples);
-
- for (uint32_t i = 0; i < encrypted_block_info.num_subsamples; ++i) {
- subsamples->push_back(cdm::SubsampleEntry(
- encrypted_block_info.subsamples[i].clear_bytes,
- encrypted_block_info.subsamples[i].cipher_bytes));
- }
-
- input_buffer->subsamples = &(*subsamples)[0];
- }
-
- input_buffer->timestamp = encrypted_block_info.tracking_info.timestamp;
-}
-
-PP_DecryptResult CdmStatusToPpDecryptResult(cdm::Status status) {
- switch (status) {
- case cdm::kSuccess:
- return PP_DECRYPTRESULT_SUCCESS;
- case cdm::kNoKey:
- return PP_DECRYPTRESULT_DECRYPT_NOKEY;
- case cdm::kNeedMoreData:
- return PP_DECRYPTRESULT_NEEDMOREDATA;
- case cdm::kDecryptError:
- return PP_DECRYPTRESULT_DECRYPT_ERROR;
- case cdm::kDecodeError:
- return PP_DECRYPTRESULT_DECODE_ERROR;
- default:
- PP_NOTREACHED();
- return PP_DECRYPTRESULT_DECODE_ERROR;
- }
-}
-
-PP_DecryptedFrameFormat CdmVideoFormatToPpDecryptedFrameFormat(
- cdm::VideoFormat format) {
- switch (format) {
- case cdm::kYv12:
- return PP_DECRYPTEDFRAMEFORMAT_YV12;
- case cdm::kI420:
- return PP_DECRYPTEDFRAMEFORMAT_I420;
- default:
- return PP_DECRYPTEDFRAMEFORMAT_UNKNOWN;
- }
-}
-
-cdm::AudioDecoderConfig::AudioCodec PpAudioCodecToCdmAudioCodec(
- PP_AudioCodec codec) {
- switch (codec) {
- case PP_AUDIOCODEC_VORBIS:
- return cdm::AudioDecoderConfig::kCodecVorbis;
- case PP_AUDIOCODEC_AAC:
- return cdm::AudioDecoderConfig::kCodecAac;
- default:
- return cdm::AudioDecoderConfig::kUnknownAudioCodec;
- }
-}
-
-cdm::VideoDecoderConfig::VideoCodec PpVideoCodecToCdmVideoCodec(
- PP_VideoCodec codec) {
- switch (codec) {
- case PP_VIDEOCODEC_VP8:
- return cdm::VideoDecoderConfig::kCodecVp8;
- case PP_VIDEOCODEC_H264:
- return cdm::VideoDecoderConfig::kCodecH264;
- default:
- return cdm::VideoDecoderConfig::kUnknownVideoCodec;
- }
-}
-
-cdm::VideoDecoderConfig::VideoCodecProfile PpVCProfileToCdmVCProfile(
- PP_VideoCodecProfile profile) {
- switch (profile) {
- case PP_VIDEOCODECPROFILE_VP8_MAIN:
- return cdm::VideoDecoderConfig::kVp8ProfileMain;
- case PP_VIDEOCODECPROFILE_H264_BASELINE:
- return cdm::VideoDecoderConfig::kH264ProfileBaseline;
- case PP_VIDEOCODECPROFILE_H264_MAIN:
- return cdm::VideoDecoderConfig::kH264ProfileMain;
- case PP_VIDEOCODECPROFILE_H264_EXTENDED:
- return cdm::VideoDecoderConfig::kH264ProfileExtended;
- case PP_VIDEOCODECPROFILE_H264_HIGH:
- return cdm::VideoDecoderConfig::kH264ProfileHigh;
- case PP_VIDEOCODECPROFILE_H264_HIGH_10:
- return cdm::VideoDecoderConfig::kH264ProfileHigh10;
- case PP_VIDEOCODECPROFILE_H264_HIGH_422:
- return cdm::VideoDecoderConfig::kH264ProfileHigh422;
- case PP_VIDEOCODECPROFILE_H264_HIGH_444_PREDICTIVE:
- return cdm::VideoDecoderConfig::kH264ProfileHigh444Predictive;
- default:
- return cdm::VideoDecoderConfig::kUnknownVideoCodecProfile;
- }
-}
-
-cdm::VideoFormat PpDecryptedFrameFormatToCdmVideoFormat(
- PP_DecryptedFrameFormat format) {
- switch (format) {
- case PP_DECRYPTEDFRAMEFORMAT_YV12:
- return cdm::kYv12;
- case PP_DECRYPTEDFRAMEFORMAT_I420:
- return cdm::kI420;
- default:
- return cdm::kUnknownVideoFormat;
- }
-}
-
-cdm::StreamType PpDecryptorStreamTypeToCdmStreamType(
- PP_DecryptorStreamType stream_type) {
- switch (stream_type) {
- case PP_DECRYPTORSTREAMTYPE_AUDIO:
- return cdm::kStreamTypeAudio;
- case PP_DECRYPTORSTREAMTYPE_VIDEO:
- return cdm::kStreamTypeVideo;
- }
-
- PP_NOTREACHED();
- return cdm::kStreamTypeVideo;
-}
-
-} // namespace
-
-namespace media {
-
-// cdm::Buffer implementation that provides access to memory owned by a
-// pp::Buffer_Dev.
-// This class holds a reference to the Buffer_Dev throughout its lifetime.
-// TODO(xhwang): Find a better name. It's confusing to have PpbBuffer,
-// pp::Buffer_Dev and PPB_Buffer_Dev.
-class PpbBuffer : public cdm::Buffer {
- public:
- static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id) {
- PP_DCHECK(buffer.data());
- PP_DCHECK(buffer.size());
- PP_DCHECK(buffer_id);
- return new PpbBuffer(buffer, buffer_id);
- }
-
- // cdm::Buffer implementation.
- virtual void Destroy() OVERRIDE { delete this; }
-
- virtual int32_t Capacity() const OVERRIDE { return buffer_.size(); }
-
- virtual uint8_t* Data() OVERRIDE {
- return static_cast<uint8_t*>(buffer_.data());
- }
-
- virtual void SetSize(int32_t size) OVERRIDE {
- PP_DCHECK(size >= 0);
- PP_DCHECK(size < Capacity());
- if (size < 0 || size > Capacity()) {
- size_ = 0;
- return;
- }
-
- size_ = size;
- }
-
- virtual int32_t Size() const OVERRIDE { return size_; }
-
- pp::Buffer_Dev buffer_dev() const { return buffer_; }
-
- uint32_t buffer_id() const { return buffer_id_; }
-
- private:
- PpbBuffer(pp::Buffer_Dev buffer, uint32_t buffer_id)
- : buffer_(buffer),
- buffer_id_(buffer_id),
- size_(0) {}
- virtual ~PpbBuffer() {}
-
- pp::Buffer_Dev buffer_;
- uint32_t buffer_id_;
- int32_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(PpbBuffer);
-};
-
-class PpbBufferAllocator {
- public:
- explicit PpbBufferAllocator(pp::Instance* instance)
- : instance_(instance),
- next_buffer_id_(1) {}
- ~PpbBufferAllocator() {}
-
- cdm::Buffer* Allocate(int32_t capacity);
-
- // Releases the buffer with |buffer_id|. A buffer can be recycled after
- // it is released.
- void Release(uint32_t buffer_id);
-
- private:
- typedef std::map<uint32_t, pp::Buffer_Dev> AllocatedBufferMap;
- typedef std::multimap<int, std::pair<uint32_t, pp::Buffer_Dev> >
- FreeBufferMap;
-
- // Always pad new allocated buffer so that we don't need to reallocate
- // buffers frequently if requested sizes fluctuate slightly.
- static const int kBufferPadding = 512;
-
- // Maximum number of free buffers we can keep when allocating new buffers.
- static const int kFreeLimit = 3;
-
- pp::Buffer_Dev AllocateNewBuffer(int capacity);
-
- pp::Instance* const instance_;
- uint32_t next_buffer_id_;
- AllocatedBufferMap allocated_buffers_;
- FreeBufferMap free_buffers_;
-
- DISALLOW_COPY_AND_ASSIGN(PpbBufferAllocator);
-};
-
-cdm::Buffer* PpbBufferAllocator::Allocate(int32_t capacity) {
- PP_DCHECK(IsMainThread());
-
- if (capacity <= 0)
- return NULL;
-
- pp::Buffer_Dev buffer;
- uint32_t buffer_id = 0;
-
- // Reuse a buffer in the free list if there is one that fits |capacity|.
- // Otherwise, create a new one.
- FreeBufferMap::iterator found = free_buffers_.lower_bound(capacity);
- if (found == free_buffers_.end()) {
- // TODO(xhwang): Report statistics about how many new buffers are allocated.
- buffer = AllocateNewBuffer(capacity);
- if (buffer.is_null())
- return NULL;
- buffer_id = next_buffer_id_++;
- } else {
- buffer = found->second.second;
- buffer_id = found->second.first;
- free_buffers_.erase(found);
- }
-
- allocated_buffers_.insert(std::make_pair(buffer_id, buffer));
-
- return PpbBuffer::Create(buffer, buffer_id);
-}
-
-void PpbBufferAllocator::Release(uint32_t buffer_id) {
- if (!buffer_id)
- return;
-
- AllocatedBufferMap::iterator found = allocated_buffers_.find(buffer_id);
- if (found == allocated_buffers_.end())
- return;
-
- pp::Buffer_Dev& buffer = found->second;
- free_buffers_.insert(
- std::make_pair(buffer.size(), std::make_pair(buffer_id, buffer)));
-
- allocated_buffers_.erase(found);
-}
-
-pp::Buffer_Dev PpbBufferAllocator::AllocateNewBuffer(int32_t capacity) {
- // Destroy the smallest buffer before allocating a new bigger buffer if the
- // number of free buffers exceeds a limit. This mechanism helps avoid ending
- // up with too many small buffers, which could happen if the size to be
- // allocated keeps increasing.
- if (free_buffers_.size() >= static_cast<uint32_t>(kFreeLimit))
- free_buffers_.erase(free_buffers_.begin());
-
- // Creation of pp::Buffer_Dev is expensive! It involves synchronous IPC calls.
- // That's why we try to avoid AllocateNewBuffer() as much as we can.
- return pp::Buffer_Dev(instance_, capacity + kBufferPadding);
-}
-
-class DecryptedBlockImpl : public cdm::DecryptedBlock {
- public:
- DecryptedBlockImpl() : buffer_(NULL), timestamp_(0) {}
- virtual ~DecryptedBlockImpl() { if (buffer_) buffer_->Destroy(); }
-
- virtual void SetDecryptedBuffer(cdm::Buffer* buffer) OVERRIDE {
- buffer_ = static_cast<PpbBuffer*>(buffer);
- }
- virtual cdm::Buffer* DecryptedBuffer() OVERRIDE { return buffer_; }
-
- virtual void SetTimestamp(int64_t timestamp) OVERRIDE {
- timestamp_ = timestamp;
- }
- virtual int64_t Timestamp() const OVERRIDE { return timestamp_; }
-
- private:
- PpbBuffer* buffer_;
- int64_t timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(DecryptedBlockImpl);
-};
-
-class VideoFrameImpl : public cdm::VideoFrame {
- public:
- VideoFrameImpl();
- virtual ~VideoFrameImpl();
-
- virtual void SetFormat(cdm::VideoFormat format) OVERRIDE {
- format_ = format;
- }
- virtual cdm::VideoFormat Format() const OVERRIDE { return format_; }
-
- virtual void SetSize(cdm::Size size) OVERRIDE { size_ = size; }
- virtual cdm::Size Size() const OVERRIDE { return size_; }
-
- virtual void SetFrameBuffer(cdm::Buffer* frame_buffer) OVERRIDE {
- frame_buffer_ = static_cast<PpbBuffer*>(frame_buffer);
- }
- virtual cdm::Buffer* FrameBuffer() OVERRIDE { return frame_buffer_; }
-
- virtual void SetPlaneOffset(cdm::VideoFrame::VideoPlane plane,
- int32_t offset) OVERRIDE {
- PP_DCHECK(0 <= plane && plane < kMaxPlanes);
- PP_DCHECK(offset >= 0);
- plane_offsets_[plane] = offset;
- }
- virtual int32_t PlaneOffset(VideoPlane plane) OVERRIDE {
- PP_DCHECK(0 <= plane && plane < kMaxPlanes);
- return plane_offsets_[plane];
- }
-
- virtual void SetStride(VideoPlane plane, int32_t stride) OVERRIDE {
- PP_DCHECK(0 <= plane && plane < kMaxPlanes);
- strides_[plane] = stride;
- }
- virtual int32_t Stride(VideoPlane plane) OVERRIDE {
- PP_DCHECK(0 <= plane && plane < kMaxPlanes);
- return strides_[plane];
- }
-
- virtual void SetTimestamp(int64_t timestamp) OVERRIDE {
- timestamp_ = timestamp;
- }
- virtual int64_t Timestamp() const OVERRIDE { return timestamp_; }
-
- private:
- // The video buffer format.
- cdm::VideoFormat format_;
-
- // Width and height of the video frame.
- cdm::Size size_;
-
- // The video frame buffer.
- PpbBuffer* frame_buffer_;
-
- // Array of data pointers to each plane in the video frame buffer.
- int32_t plane_offsets_[kMaxPlanes];
-
- // Array of strides for each plane, typically greater or equal to the width
- // of the surface divided by the horizontal sampling period. Note that
- // strides can be negative.
- int32_t strides_[kMaxPlanes];
-
- // Presentation timestamp in microseconds.
- int64_t timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoFrameImpl);
-};
-
-VideoFrameImpl::VideoFrameImpl()
- : format_(cdm::kUnknownVideoFormat),
- frame_buffer_(NULL),
- timestamp_(0) {
- for (int32_t i = 0; i < kMaxPlanes; ++i) {
- plane_offsets_[i] = 0;
- strides_[i] = 0;
- }
-}
-
-VideoFrameImpl::~VideoFrameImpl() {
- if (frame_buffer_)
- frame_buffer_->Destroy();
-}
-
-class AudioFramesImpl : public cdm::AudioFrames {
- public:
- AudioFramesImpl() : buffer_(NULL) {}
- virtual ~AudioFramesImpl() {
- if (buffer_)
- buffer_->Destroy();
- }
-
- // AudioFrames implementation.
- virtual void SetFrameBuffer(cdm::Buffer* buffer) OVERRIDE {
- buffer_ = static_cast<PpbBuffer*>(buffer);
- }
- virtual cdm::Buffer* FrameBuffer() OVERRIDE {
- return buffer_;
- }
-
- private:
- PpbBuffer* buffer_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioFramesImpl);
-};
-
-// GetCdmHostFunc implementation.
-void* GetCdmHost(int host_interface_version, void* user_data);
-
-// A wrapper class for abstracting away PPAPI interaction and threading for a
-// Content Decryption Module (CDM).
-class CdmWrapper : public pp::Instance,
- public pp::ContentDecryptor_Private,
- public cdm::Host {
- public:
- CdmWrapper(PP_Instance instance, pp::Module* module);
- virtual ~CdmWrapper();
-
- // pp::Instance implementation.
- virtual bool Init(uint32_t argc, const char* argn[], const char* argv[]) {
- return true;
- }
-
- // PPP_ContentDecryptor_Private implementation.
- // Note: Results of calls to these methods must be reported through the
- // PPB_ContentDecryptor_Private interface.
- virtual void Initialize(const std::string& key_system,
- bool can_challenge_platform) OVERRIDE;
- virtual void GenerateKeyRequest(const std::string& type,
- pp::VarArrayBuffer init_data) OVERRIDE;
- virtual void AddKey(const std::string& session_id,
- pp::VarArrayBuffer key,
- pp::VarArrayBuffer init_data) OVERRIDE;
- virtual void CancelKeyRequest(const std::string& session_id) OVERRIDE;
- virtual void Decrypt(
- pp::Buffer_Dev encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
- virtual void InitializeAudioDecoder(
- const PP_AudioDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) OVERRIDE;
- virtual void InitializeVideoDecoder(
- const PP_VideoDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) OVERRIDE;
- virtual void DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) OVERRIDE;
- virtual void ResetDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) OVERRIDE;
- virtual void DecryptAndDecode(
- PP_DecryptorStreamType decoder_type,
- pp::Buffer_Dev encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info) OVERRIDE;
-
- // cdm::Host implementation.
- virtual cdm::Buffer* Allocate(int32_t capacity) OVERRIDE;
- virtual void SetTimer(int64_t delay_ms, void* context) OVERRIDE;
- virtual double GetCurrentWallTimeInSeconds() OVERRIDE;
- virtual void SendKeyMessage(
- const char* session_id, int32_t session_id_length,
- const char* message, int32_t message_length,
- const char* default_url, int32_t default_url_length) OVERRIDE;
- virtual void SendKeyError(const char* session_id,
- int32_t session_id_length,
- cdm::MediaKeyError error_code,
- uint32_t system_code) OVERRIDE;
- virtual void GetPrivateData(int32_t* instance,
- GetPrivateInterface* get_interface) OVERRIDE;
-
- private:
- struct SessionInfo {
- SessionInfo(const std::string& key_system_in,
- const std::string& session_id_in)
- : key_system(key_system_in),
- session_id(session_id_in) {}
- const std::string key_system;
- const std::string session_id;
- };
-
- typedef linked_ptr<DecryptedBlockImpl> LinkedDecryptedBlock;
- typedef linked_ptr<VideoFrameImpl> LinkedVideoFrame;
- typedef linked_ptr<AudioFramesImpl> LinkedAudioFrames;
-
- bool CreateCdmInstance(const std::string& key_system);
-
- void SendUnknownKeyError(const std::string& key_system,
- const std::string& session_id);
-
- void SendKeyAdded(const std::string& key_system,
- const std::string& session_id);
-
- void SendKeyErrorInternal(const std::string& key_system,
- const std::string& session_id,
- cdm::MediaKeyError error_code,
- uint32_t system_code);
-
- // <code>PPB_ContentDecryptor_Private</code> dispatchers. These are passed to
- // <code>callback_factory_</code> to ensure that calls into
- // <code>PPP_ContentDecryptor_Private</code> are asynchronous.
- void KeyAdded(int32_t result, const SessionInfo& session_info);
- void KeyMessage(int32_t result,
- const SessionInfo& session_info,
- const std::vector<uint8>& message,
- const std::string& default_url);
- void KeyError(int32_t result,
- const SessionInfo& session_info,
- cdm::MediaKeyError error_code,
- uint32_t system_code);
- void DeliverBlock(int32_t result,
- const cdm::Status& status,
- const LinkedDecryptedBlock& decrypted_block,
- const PP_DecryptTrackingInfo& tracking_info);
- void DecoderInitializeDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id,
- bool success);
- void DecoderDeinitializeDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id);
- void DecoderResetDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id);
- void DeliverFrame(int32_t result,
- const cdm::Status& status,
- const LinkedVideoFrame& video_frame,
- const PP_DecryptTrackingInfo& tracking_info);
- void DeliverSamples(int32_t result,
- const cdm::Status& status,
- const LinkedAudioFrames& audio_frames,
- const PP_DecryptTrackingInfo& tracking_info);
-
- // Helper for SetTimer().
- void TimerExpired(int32_t result, void* context);
-
- bool IsValidVideoFrame(const LinkedVideoFrame& video_frame);
-
- PpbBufferAllocator allocator_;
- pp::CompletionCallbackFactory<CdmWrapper> callback_factory_;
- cdm::ContentDecryptionModule* cdm_;
- std::string key_system_;
-
- DISALLOW_COPY_AND_ASSIGN(CdmWrapper);
-};
-
-CdmWrapper::CdmWrapper(PP_Instance instance, pp::Module* module)
- : pp::Instance(instance),
- pp::ContentDecryptor_Private(this),
- allocator_(this),
- cdm_(NULL) {
- callback_factory_.Initialize(this);
-}
-
-CdmWrapper::~CdmWrapper() {
- if (cdm_)
- cdm_->Destroy();
-}
-
-bool CdmWrapper::CreateCdmInstance(const std::string& key_system) {
- PP_DCHECK(!cdm_);
- cdm_ = static_cast<cdm::ContentDecryptionModule*>(
- ::CreateCdmInstance(cdm::kCdmInterfaceVersion,
- key_system.data(), key_system.size(),
- GetCdmHost, this));
-
- return (cdm_ != NULL);
-}
-
-void CdmWrapper::Initialize(const std::string& key_system,
- bool can_challenge_platform) {
- PP_DCHECK(!key_system.empty());
- PP_DCHECK(key_system_.empty() || (key_system_ == key_system && cdm_));
-
- if (!cdm_) {
- if (!CreateCdmInstance(key_system)) {
- // TODO(jrummell): Is UnknownKeyError the correct response?
- SendUnknownKeyError(key_system, std::string());
- return;
- }
- }
- PP_DCHECK(cdm_);
- key_system_ = key_system;
-}
-
-void CdmWrapper::GenerateKeyRequest(const std::string& type,
- pp::VarArrayBuffer init_data) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
-
-#if defined(CHECK_DOCUMENT_URL)
- PP_URLComponents_Dev url_components = {};
- pp::Var href = pp::URLUtil_Dev::Get()->GetDocumentURL(
- pp::InstanceHandle(pp_instance()), &url_components);
- PP_DCHECK(href.is_string());
- PP_DCHECK(!href.AsString().empty());
- PP_DCHECK(url_components.host.begin);
- PP_DCHECK(0 < url_components.host.len);
-#endif // defined(CHECK_DOCUMENT_URL)
-
- cdm::Status status = cdm_->GenerateKeyRequest(
- type.data(), type.size(),
- static_cast<const uint8_t*>(init_data.Map()),
- init_data.ByteLength());
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess)
- SendUnknownKeyError(key_system_, std::string());
-}
-
-void CdmWrapper::AddKey(const std::string& session_id,
- pp::VarArrayBuffer key,
- pp::VarArrayBuffer init_data) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
- if (!cdm_) {
- SendUnknownKeyError(key_system_, session_id);
- return;
- }
-
- const uint8_t* key_ptr = static_cast<const uint8_t*>(key.Map());
- int key_size = key.ByteLength();
- const uint8_t* init_data_ptr = static_cast<const uint8_t*>(init_data.Map());
- int init_data_size = init_data.ByteLength();
- PP_DCHECK(!init_data_ptr == !init_data_size);
-
- if (!key_ptr || key_size <= 0) {
- SendUnknownKeyError(key_system_, session_id);
- return;
- }
-
- cdm::Status status = cdm_->AddKey(session_id.data(), session_id.size(),
- key_ptr, key_size,
- init_data_ptr, init_data_size);
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess) {
- SendUnknownKeyError(key_system_, session_id);
- return;
- }
-
- SendKeyAdded(key_system_, session_id);
-}
-
-void CdmWrapper::CancelKeyRequest(const std::string& session_id) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
- if (!cdm_) {
- SendUnknownKeyError(key_system_, session_id);
- return;
- }
-
- cdm::Status status = cdm_->CancelKeyRequest(session_id.data(),
- session_id.size());
- PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
- if (status != cdm::kSuccess)
- SendUnknownKeyError(key_system_, session_id);
-}
-
-// Note: In the following decryption/decoding related functions, errors are NOT
-// reported via KeyError, but are reported via corresponding PPB calls.
-
-void CdmWrapper::Decrypt(pp::Buffer_Dev encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
- PP_DCHECK(!encrypted_buffer.is_null());
-
- // Release a buffer that the caller indicated it is finished with.
- allocator_.Release(encrypted_block_info.tracking_info.buffer_id);
-
- cdm::Status status = cdm::kDecryptError;
- LinkedDecryptedBlock decrypted_block(new DecryptedBlockImpl());
-
- if (cdm_) {
- cdm::InputBuffer input_buffer;
- std::vector<cdm::SubsampleEntry> subsamples;
- ConfigureInputBuffer(encrypted_buffer, encrypted_block_info, &subsamples,
- &input_buffer);
- status = cdm_->Decrypt(input_buffer, decrypted_block.get());
- PP_DCHECK(status != cdm::kSuccess ||
- (decrypted_block->DecryptedBuffer() &&
- decrypted_block->DecryptedBuffer()->Size()));
- }
-
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DeliverBlock,
- status,
- decrypted_block,
- encrypted_block_info.tracking_info));
-}
-
-void CdmWrapper::InitializeAudioDecoder(
- const PP_AudioDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
-
- cdm::Status status = cdm::kSessionError;
- if (cdm_) {
- cdm::AudioDecoderConfig cdm_decoder_config;
- cdm_decoder_config.codec =
- PpAudioCodecToCdmAudioCodec(decoder_config.codec);
- cdm_decoder_config.channel_count = decoder_config.channel_count;
- cdm_decoder_config.bits_per_channel = decoder_config.bits_per_channel;
- cdm_decoder_config.samples_per_second = decoder_config.samples_per_second;
- cdm_decoder_config.extra_data =
- static_cast<uint8_t*>(extra_data_buffer.data());
- cdm_decoder_config.extra_data_size =
- static_cast<int32_t>(extra_data_buffer.size());
- status = cdm_->InitializeAudioDecoder(cdm_decoder_config);
- }
-
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DecoderInitializeDone,
- PP_DECRYPTORSTREAMTYPE_AUDIO,
- decoder_config.request_id,
- status == cdm::kSuccess));
-}
-
-void CdmWrapper::InitializeVideoDecoder(
- const PP_VideoDecoderConfig& decoder_config,
- pp::Buffer_Dev extra_data_buffer) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
-
- cdm::Status status = cdm::kSessionError;
- if (cdm_) {
- cdm::VideoDecoderConfig cdm_decoder_config;
- cdm_decoder_config.codec =
- PpVideoCodecToCdmVideoCodec(decoder_config.codec);
- cdm_decoder_config.profile =
- PpVCProfileToCdmVCProfile(decoder_config.profile);
- cdm_decoder_config.format =
- PpDecryptedFrameFormatToCdmVideoFormat(decoder_config.format);
- cdm_decoder_config.coded_size.width = decoder_config.width;
- cdm_decoder_config.coded_size.height = decoder_config.height;
- cdm_decoder_config.extra_data =
- static_cast<uint8_t*>(extra_data_buffer.data());
- cdm_decoder_config.extra_data_size =
- static_cast<int32_t>(extra_data_buffer.size());
- status = cdm_->InitializeVideoDecoder(cdm_decoder_config);
- }
-
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DecoderInitializeDone,
- PP_DECRYPTORSTREAMTYPE_VIDEO,
- decoder_config.request_id,
- status == cdm::kSuccess));
-}
-
-void CdmWrapper::DeinitializeDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
- if (cdm_) {
- cdm_->DeinitializeDecoder(
- PpDecryptorStreamTypeToCdmStreamType(decoder_type));
- }
-
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DecoderDeinitializeDone,
- decoder_type,
- request_id));
-}
-
-void CdmWrapper::ResetDecoder(PP_DecryptorStreamType decoder_type,
- uint32_t request_id) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
- if (cdm_)
- cdm_->ResetDecoder(PpDecryptorStreamTypeToCdmStreamType(decoder_type));
-
- CallOnMain(callback_factory_.NewCallback(&CdmWrapper::DecoderResetDone,
- decoder_type,
- request_id));
-}
-
-void CdmWrapper::DecryptAndDecode(
- PP_DecryptorStreamType decoder_type,
- pp::Buffer_Dev encrypted_buffer,
- const PP_EncryptedBlockInfo& encrypted_block_info) {
- PP_DCHECK(cdm_); // Initialize() should have succeeded.
-
- // Release a buffer that the caller indicated it is finished with.
- allocator_.Release(encrypted_block_info.tracking_info.buffer_id);
-
- cdm::InputBuffer input_buffer;
- std::vector<cdm::SubsampleEntry> subsamples;
- if (cdm_ && !encrypted_buffer.is_null()) {
- ConfigureInputBuffer(encrypted_buffer,
- encrypted_block_info,
- &subsamples,
- &input_buffer);
- }
-
- cdm::Status status = cdm::kDecodeError;
-
- switch (decoder_type) {
- case PP_DECRYPTORSTREAMTYPE_VIDEO: {
- LinkedVideoFrame video_frame(new VideoFrameImpl());
- if (cdm_)
- status = cdm_->DecryptAndDecodeFrame(input_buffer, video_frame.get());
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DeliverFrame,
- status,
- video_frame,
- encrypted_block_info.tracking_info));
- return;
- }
-
- case PP_DECRYPTORSTREAMTYPE_AUDIO: {
- LinkedAudioFrames audio_frames(new AudioFramesImpl());
- if (cdm_) {
- status = cdm_->DecryptAndDecodeSamples(input_buffer,
- audio_frames.get());
- }
- CallOnMain(callback_factory_.NewCallback(
- &CdmWrapper::DeliverSamples,
- status,
- audio_frames,
- encrypted_block_info.tracking_info));
- return;
- }
-
- default:
- PP_NOTREACHED();
- return;
- }
-}
-
-cdm::Buffer* CdmWrapper::Allocate(int32_t capacity) {
- return allocator_.Allocate(capacity);
-}
-
-void CdmWrapper::SetTimer(int64_t delay_ms, void* context) {
- // NOTE: doesn't really need to run on the main thread; could just as well run
- // on a helper thread if |cdm_| were thread-friendly and care was taken. We
- // only use CallOnMainThread() here to get delayed-execution behavior.
- pp::Module::Get()->core()->CallOnMainThread(
- delay_ms,
- callback_factory_.NewCallback(&CdmWrapper::TimerExpired, context),
- PP_OK);
-}
-
-void CdmWrapper::TimerExpired(int32_t result, void* context) {
- PP_DCHECK(result == PP_OK);
- cdm_->TimerExpired(context);
-}
-
-double CdmWrapper::GetCurrentWallTimeInSeconds() {
- return pp::Module::Get()->core()->GetTime();
-}
-
-void CdmWrapper::SendKeyMessage(
- const char* session_id, int32_t session_id_length,
- const char* message, int32_t message_length,
- const char* default_url, int32_t default_url_length) {
- PP_DCHECK(!key_system_.empty());
- PostOnMain(callback_factory_.NewCallback(
- &CdmWrapper::KeyMessage,
- SessionInfo(key_system_,
- std::string(session_id, session_id_length)),
- std::vector<uint8>(message, message + message_length),
- std::string(default_url, default_url_length)));
-}
-
-void CdmWrapper::SendKeyError(const char* session_id,
- int32_t session_id_length,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
- SendKeyErrorInternal(key_system_,
- std::string(session_id, session_id_length),
- error_code,
- system_code);
-}
-
-void CdmWrapper::GetPrivateData(int32_t* instance,
- cdm::Host::GetPrivateInterface* get_interface) {
- *instance = pp_instance();
- *get_interface = pp::Module::Get()->get_browser_interface();
-}
-
-void CdmWrapper::SendUnknownKeyError(const std::string& key_system,
- const std::string& session_id) {
- SendKeyErrorInternal(key_system, session_id, cdm::kUnknownError, 0);
-}
-
-void CdmWrapper::SendKeyAdded(const std::string& key_system,
- const std::string& session_id) {
- PostOnMain(callback_factory_.NewCallback(
- &CdmWrapper::KeyAdded,
- SessionInfo(key_system_, session_id)));
-}
-
-void CdmWrapper::SendKeyErrorInternal(const std::string& key_system,
- const std::string& session_id,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
- PP_DCHECK(!key_system.empty());
- PostOnMain(callback_factory_.NewCallback(&CdmWrapper::KeyError,
- SessionInfo(key_system_, session_id),
- error_code,
- system_code));
-}
-
-void CdmWrapper::KeyAdded(int32_t result, const SessionInfo& session_info) {
- PP_DCHECK(result == PP_OK);
- PP_DCHECK(!session_info.key_system.empty());
- pp::ContentDecryptor_Private::KeyAdded(session_info.key_system,
- session_info.session_id);
-}
-
-void CdmWrapper::KeyMessage(int32_t result,
- const SessionInfo& session_info,
- const std::vector<uint8>& message,
- const std::string& default_url) {
- PP_DCHECK(result == PP_OK);
- PP_DCHECK(!session_info.key_system.empty());
-
- pp::VarArrayBuffer message_array_buffer(message.size());
- if (message.size() > 0) {
- memcpy(message_array_buffer.Map(), message.data(), message.size());
- }
-
- pp::ContentDecryptor_Private::KeyMessage(
- session_info.key_system, session_info.session_id,
- message_array_buffer, default_url);
-}
-
-void CdmWrapper::KeyError(int32_t result,
- const SessionInfo& session_info,
- cdm::MediaKeyError error_code,
- uint32_t system_code) {
- PP_DCHECK(result == PP_OK);
- PP_DCHECK(!session_info.key_system.empty());
- pp::ContentDecryptor_Private::KeyError(
- session_info.key_system, session_info.session_id,
- error_code, system_code);
-}
-
-void CdmWrapper::DeliverBlock(int32_t result,
- const cdm::Status& status,
- const LinkedDecryptedBlock& decrypted_block,
- const PP_DecryptTrackingInfo& tracking_info) {
- PP_DCHECK(result == PP_OK);
- PP_DecryptedBlockInfo decrypted_block_info;
- decrypted_block_info.tracking_info = tracking_info;
- decrypted_block_info.tracking_info.timestamp = decrypted_block->Timestamp();
- decrypted_block_info.tracking_info.buffer_id = 0;
- decrypted_block_info.data_size = 0;
- decrypted_block_info.result = CdmStatusToPpDecryptResult(status);
-
- pp::Buffer_Dev buffer;
-
- if (decrypted_block_info.result == PP_DECRYPTRESULT_SUCCESS) {
- PP_DCHECK(decrypted_block.get() && decrypted_block->DecryptedBuffer());
- if (!decrypted_block.get() || !decrypted_block->DecryptedBuffer()) {
- PP_NOTREACHED();
- decrypted_block_info.result = PP_DECRYPTRESULT_DECRYPT_ERROR;
- } else {
- PpbBuffer* ppb_buffer =
- static_cast<PpbBuffer*>(decrypted_block->DecryptedBuffer());
- buffer = ppb_buffer->buffer_dev();
- decrypted_block_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
- decrypted_block_info.data_size = ppb_buffer->Size();
- }
- }
-
- pp::ContentDecryptor_Private::DeliverBlock(buffer, decrypted_block_info);
-}
-
-void CdmWrapper::DecoderInitializeDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id,
- bool success) {
- PP_DCHECK(result == PP_OK);
- pp::ContentDecryptor_Private::DecoderInitializeDone(decoder_type,
- request_id,
- success);
-}
-
-void CdmWrapper::DecoderDeinitializeDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id) {
- pp::ContentDecryptor_Private::DecoderDeinitializeDone(decoder_type,
- request_id);
-}
-
-void CdmWrapper::DecoderResetDone(int32_t result,
- PP_DecryptorStreamType decoder_type,
- uint32_t request_id) {
- pp::ContentDecryptor_Private::DecoderResetDone(decoder_type, request_id);
-}
-
-void CdmWrapper::DeliverFrame(
- int32_t result,
- const cdm::Status& status,
- const LinkedVideoFrame& video_frame,
- const PP_DecryptTrackingInfo& tracking_info) {
- PP_DCHECK(result == PP_OK);
- PP_DecryptedFrameInfo decrypted_frame_info;
- decrypted_frame_info.tracking_info.request_id = tracking_info.request_id;
- decrypted_frame_info.tracking_info.buffer_id = 0;
- decrypted_frame_info.result = CdmStatusToPpDecryptResult(status);
-
- pp::Buffer_Dev buffer;
-
- if (decrypted_frame_info.result == PP_DECRYPTRESULT_SUCCESS) {
- if (!IsValidVideoFrame(video_frame)) {
- PP_NOTREACHED();
- decrypted_frame_info.result = PP_DECRYPTRESULT_DECODE_ERROR;
- } else {
- PpbBuffer* ppb_buffer =
- static_cast<PpbBuffer*>(video_frame->FrameBuffer());
-
- buffer = ppb_buffer->buffer_dev();
-
- decrypted_frame_info.tracking_info.timestamp = video_frame->Timestamp();
- decrypted_frame_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
- decrypted_frame_info.format =
- CdmVideoFormatToPpDecryptedFrameFormat(video_frame->Format());
- decrypted_frame_info.width = video_frame->Size().width;
- decrypted_frame_info.height = video_frame->Size().height;
- decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_Y] =
- video_frame->PlaneOffset(cdm::VideoFrame::kYPlane);
- decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_U] =
- video_frame->PlaneOffset(cdm::VideoFrame::kUPlane);
- decrypted_frame_info.plane_offsets[PP_DECRYPTEDFRAMEPLANES_V] =
- video_frame->PlaneOffset(cdm::VideoFrame::kVPlane);
- decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_Y] =
- video_frame->Stride(cdm::VideoFrame::kYPlane);
- decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_U] =
- video_frame->Stride(cdm::VideoFrame::kUPlane);
- decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_V] =
- video_frame->Stride(cdm::VideoFrame::kVPlane);
- }
- }
- pp::ContentDecryptor_Private::DeliverFrame(buffer, decrypted_frame_info);
-}
-
-void CdmWrapper::DeliverSamples(int32_t result,
- const cdm::Status& status,
- const LinkedAudioFrames& audio_frames,
- const PP_DecryptTrackingInfo& tracking_info) {
- PP_DCHECK(result == PP_OK);
-
- PP_DecryptedBlockInfo decrypted_block_info;
- decrypted_block_info.tracking_info = tracking_info;
- decrypted_block_info.tracking_info.timestamp = 0;
- decrypted_block_info.tracking_info.buffer_id = 0;
- decrypted_block_info.data_size = 0;
- decrypted_block_info.result = CdmStatusToPpDecryptResult(status);
-
- pp::Buffer_Dev buffer;
-
- if (decrypted_block_info.result == PP_DECRYPTRESULT_SUCCESS) {
- PP_DCHECK(audio_frames.get() && audio_frames->FrameBuffer());
- if (!audio_frames.get() || !audio_frames->FrameBuffer()) {
- PP_NOTREACHED();
- decrypted_block_info.result = PP_DECRYPTRESULT_DECRYPT_ERROR;
- } else {
- PpbBuffer* ppb_buffer =
- static_cast<PpbBuffer*>(audio_frames->FrameBuffer());
- buffer = ppb_buffer->buffer_dev();
- decrypted_block_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
- decrypted_block_info.data_size = ppb_buffer->Size();
- }
- }
-
- pp::ContentDecryptor_Private::DeliverSamples(buffer, decrypted_block_info);
-}
-
-bool CdmWrapper::IsValidVideoFrame(const LinkedVideoFrame& video_frame) {
- if (!video_frame.get() ||
- !video_frame->FrameBuffer() ||
- (video_frame->Format() != cdm::kI420 &&
- video_frame->Format() != cdm::kYv12)) {
- return false;
- }
-
- PpbBuffer* ppb_buffer = static_cast<PpbBuffer*>(video_frame->FrameBuffer());
-
- for (int i = 0; i < cdm::VideoFrame::kMaxPlanes; ++i) {
- int plane_height = (i == cdm::VideoFrame::kYPlane) ?
- video_frame->Size().height : (video_frame->Size().height + 1) / 2;
- cdm::VideoFrame::VideoPlane plane =
- static_cast<cdm::VideoFrame::VideoPlane>(i);
- if (ppb_buffer->Size() < video_frame->PlaneOffset(plane) +
- plane_height * video_frame->Stride(plane)) {
- return false;
- }
- }
-
- return true;
-}
-
-void* GetCdmHost(int host_interface_version, void* user_data) {
- if (!host_interface_version || !user_data)
- return NULL;
-
- if (host_interface_version != cdm::kHostInterfaceVersion)
- return NULL;
-
- CdmWrapper* cdm_wrapper = static_cast<CdmWrapper*>(user_data);
- return static_cast<cdm::Host*>(cdm_wrapper);
-}
-
-// This object is the global object representing this plugin library as long
-// as it is loaded.
-class CdmWrapperModule : public pp::Module {
- public:
- CdmWrapperModule() : pp::Module() {
- // This function blocks the renderer thread (PluginInstance::Initialize()).
- // Move this call to other places if this may be a concern in the future.
- INITIALIZE_CDM_MODULE();
- }
- virtual ~CdmWrapperModule() {
- DeinitializeCdmModule();
- }
-
- virtual pp::Instance* CreateInstance(PP_Instance instance) {
- return new CdmWrapper(instance, this);
- }
-};
-
-} // namespace media
-
-namespace pp {
-
-// Factory function for your specialization of the Module object.
-Module* CreateModule() {
- return new media::CdmWrapperModule();
-}
-
-} // namespace pp
diff --git a/chromium/media/cdm/ppapi/cdm_wrapper.h b/chromium/media/cdm/ppapi/cdm_wrapper.h
new file mode 100644
index 00000000000..d827336a8c6
--- /dev/null
+++ b/chromium/media/cdm/ppapi/cdm_wrapper.h
@@ -0,0 +1,490 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CDM_WRAPPER_H_
+#define MEDIA_CDM_PPAPI_CDM_WRAPPER_H_
+
+#include <map>
+#include <queue>
+#include <string>
+
+#include "base/basictypes.h"
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "media/cdm/ppapi/cdm_helpers.h"
+#include "media/cdm/ppapi/supported_cdm_versions.h"
+#include "ppapi/cpp/logging.h"
+
+namespace media {
+
+// CdmWrapper wraps different versions of ContentDecryptionModule interfaces and
+// exposes a common interface to the caller.
+//
+// The caller should call CdmWrapper::Create() to create a CDM instance.
+// CdmWrapper will first try to create a CDM instance that supports the latest
+// CDM interface (ContentDecryptionModule). If such an instance cannot be
+// created (e.g. an older CDM was loaded), CdmWrapper will try to create a CDM
+// that supports an older version of CDM interface (e.g.
+// ContentDecryptionModule_*). Internally CdmWrapper converts the CdmWrapper
+// calls to corresponding ContentDecryptionModule calls.
+//
+// Note that CdmWrapper interface always reflects the latest state of content
+// decryption related PPAPI APIs (e.g. pp::ContentDecryptor_Private).
+//
+// Since this file is highly templated and default implementations are short
+// (just a shim layer in most cases), everything is done in this header file.
+class CdmWrapper {
+ public:
+ // CDM_1 and CDM_2 methods AddKey() and CancelKeyRequest() may require
+ // callbacks to fire. Use this enum to indicate the additional calls required.
+ // TODO(jrummell): Remove return value once CDM_1 and CDM_2 are no longer
+ // supported.
+ enum Result {
+ NO_ACTION,
+ CALL_KEY_ADDED,
+ CALL_KEY_ERROR
+ };
+
+ static CdmWrapper* Create(const char* key_system,
+ uint32_t key_system_size,
+ GetCdmHostFunc get_cdm_host_func,
+ void* user_data);
+
+ virtual ~CdmWrapper() {};
+
+ virtual void CreateSession(uint32_t session_id,
+ const char* type,
+ uint32_t type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) = 0;
+ virtual Result UpdateSession(uint32_t session_id,
+ const uint8_t* response,
+ uint32_t response_size) = 0;
+ virtual Result ReleaseSession(uint32_t session_id) = 0;
+ virtual void TimerExpired(void* context) = 0;
+ virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
+ cdm::DecryptedBlock* decrypted_buffer) = 0;
+ virtual cdm::Status InitializeAudioDecoder(
+ const cdm::AudioDecoderConfig& audio_decoder_config) = 0;
+ virtual cdm::Status InitializeVideoDecoder(
+ const cdm::VideoDecoderConfig& video_decoder_config) = 0;
+ virtual void DeinitializeDecoder(cdm::StreamType decoder_type) = 0;
+ virtual void ResetDecoder(cdm::StreamType decoder_type) = 0;
+ virtual cdm::Status DecryptAndDecodeFrame(
+ const cdm::InputBuffer& encrypted_buffer,
+ cdm::VideoFrame* video_frame) = 0;
+ virtual cdm::Status DecryptAndDecodeSamples(
+ const cdm::InputBuffer& encrypted_buffer,
+ cdm::AudioFrames* audio_frames) = 0;
+ virtual void OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) = 0;
+ virtual void OnQueryOutputProtectionStatus(
+ uint32_t link_mask,
+ uint32_t output_protection_mask) = 0;
+
+ // ContentDecryptionModule_1 and ContentDecryptionModule_2 interface methods
+ // AddKey() and CancelKeyRequest() (older versions of UpdateSession() and
+ // ReleaseSession(), respectively) pass in the web_session_id rather than the
+ // session_id. As well, Host_1 and Host_2 callbacks SendKeyMessage() and
+ // SendKeyError() include the web_session_id, but the actual callbacks need
+ // session_id.
+ //
+ // The following functions maintain the session_id <-> web_session_id mapping.
+ // These can be removed once _1 and _2 interfaces are no longer supported.
+
+ // Determine the corresponding session_id for |web_session_id|.
+ virtual uint32_t LookupSessionId(const std::string& web_session_id) = 0;
+
+ // Determine the corresponding session_id for |session_id|.
+ virtual const std::string LookupWebSessionId(uint32_t session_id) = 0;
+
+ // Map between session_id and web_session_id.
+ // TODO(jrummell): The following can be removed once CDM_1 and CDM_2 are
+ // no longer supported.
+ typedef std::map<uint32_t, std::string> SessionMap;
+ SessionMap session_map_;
+
+ static const uint32_t kInvalidSessionId = 0;
+
+ // As the response from PrefixedGenerateKeyRequest() may be synchronous or
+ // asynchronous, keep track of the current request during the call to handle
+ // synchronous responses or errors. If no response received, add this request
+ // to a queue and assume that the subsequent responses come back in the order
+ // issued.
+ // TODO(jrummell): Remove once all supported CDM host interfaces support
+ // session_id.
+ uint32_t current_key_request_session_id_;
+ std::queue<uint32_t> pending_key_request_session_ids_;
+
+ protected:
+ CdmWrapper() : current_key_request_session_id_(kInvalidSessionId) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CdmWrapper);
+};
+
+// Template class that does the CdmWrapper -> CdmInterface conversion. Default
+// implementations are provided. Any methods that need special treatment should
+// be specialized.
+template <class CdmInterface>
+class CdmWrapperImpl : public CdmWrapper {
+ public:
+ static CdmWrapper* Create(const char* key_system,
+ uint32_t key_system_size,
+ GetCdmHostFunc get_cdm_host_func,
+ void* user_data) {
+ void* cdm_instance = ::CreateCdmInstance(
+ CdmInterface::kVersion, key_system, key_system_size, get_cdm_host_func,
+ user_data);
+ if (!cdm_instance)
+ return NULL;
+
+ return new CdmWrapperImpl<CdmInterface>(
+ static_cast<CdmInterface*>(cdm_instance));
+ }
+
+ virtual ~CdmWrapperImpl() {
+ cdm_->Destroy();
+ }
+
+ virtual void CreateSession(uint32_t session_id,
+ const char* type,
+ uint32_t type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) OVERRIDE {
+ cdm_->CreateSession(session_id, type, type_size, init_data, init_data_size);
+ }
+
+ virtual Result UpdateSession(uint32_t session_id,
+ const uint8_t* response,
+ uint32_t response_size) OVERRIDE {
+ cdm_->UpdateSession(session_id, response, response_size);
+ return NO_ACTION;
+ }
+
+ virtual Result ReleaseSession(uint32_t session_id) OVERRIDE {
+ cdm_->ReleaseSession(session_id);
+ return NO_ACTION;
+ }
+
+ virtual void TimerExpired(void* context) OVERRIDE {
+ cdm_->TimerExpired(context);
+ }
+
+ virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
+ cdm::DecryptedBlock* decrypted_buffer) OVERRIDE {
+ return cdm_->Decrypt(encrypted_buffer, decrypted_buffer);
+ }
+
+ virtual cdm::Status InitializeAudioDecoder(
+ const cdm::AudioDecoderConfig& audio_decoder_config) OVERRIDE {
+ return cdm_->InitializeAudioDecoder(audio_decoder_config);
+ }
+
+ virtual cdm::Status InitializeVideoDecoder(
+ const cdm::VideoDecoderConfig& video_decoder_config) OVERRIDE {
+ return cdm_->InitializeVideoDecoder(video_decoder_config);
+ }
+
+ virtual void DeinitializeDecoder(cdm::StreamType decoder_type) OVERRIDE {
+ cdm_->DeinitializeDecoder(decoder_type);
+ }
+
+ virtual void ResetDecoder(cdm::StreamType decoder_type) OVERRIDE {
+ cdm_->ResetDecoder(decoder_type);
+ }
+
+ virtual cdm::Status DecryptAndDecodeFrame(
+ const cdm::InputBuffer& encrypted_buffer,
+ cdm::VideoFrame* video_frame) OVERRIDE {
+ return cdm_->DecryptAndDecodeFrame(encrypted_buffer, video_frame);
+ }
+
+ virtual cdm::Status DecryptAndDecodeSamples(
+ const cdm::InputBuffer& encrypted_buffer,
+ cdm::AudioFrames* audio_frames) OVERRIDE {
+ return cdm_->DecryptAndDecodeSamples(encrypted_buffer, audio_frames);
+ }
+
+ virtual void OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) OVERRIDE {
+ cdm_->OnPlatformChallengeResponse(response);
+ }
+
+ virtual void OnQueryOutputProtectionStatus(
+ uint32_t link_mask,
+ uint32_t output_protection_mask) OVERRIDE {
+ cdm_->OnQueryOutputProtectionStatus(link_mask, output_protection_mask);
+ }
+
+ uint32_t LookupSessionId(const std::string& web_session_id) {
+ for (SessionMap::iterator it = session_map_.begin();
+ it != session_map_.end();
+ ++it) {
+ if (it->second == web_session_id)
+ return it->first;
+ }
+
+ // There is no entry in the map; assume it came from the current
+ // PrefixedGenerateKeyRequest() call (if possible). If no current request,
+ // assume it came from the oldest PrefixedGenerateKeyRequest() call.
+ uint32_t session_id = current_key_request_session_id_;
+ if (current_key_request_session_id_) {
+ // Only 1 response is allowed for the current
+ // PrefixedGenerateKeyRequest().
+ current_key_request_session_id_ = kInvalidSessionId;
+ } else {
+ PP_DCHECK(!pending_key_request_session_ids_.empty());
+ session_id = pending_key_request_session_ids_.front();
+ pending_key_request_session_ids_.pop();
+ }
+
+ // If this is a valid |session_id|, add it to the list. Otherwise, avoid
+ // adding empty string as a mapping to prevent future calls with an empty
+ // string from using the wrong session_id.
+ if (!web_session_id.empty()) {
+ PP_DCHECK(session_map_.find(session_id) == session_map_.end());
+ session_map_[session_id] = web_session_id;
+ }
+
+ return session_id;
+ }
+
+ const std::string LookupWebSessionId(uint32_t session_id) {
+ // Session may not exist if error happens during CreateSession().
+ SessionMap::iterator it = session_map_.find(session_id);
+ return (it != session_map_.end()) ? it->second : std::string();
+ }
+
+ private:
+ CdmWrapperImpl(CdmInterface* cdm) : cdm_(cdm) {
+ PP_DCHECK(cdm_);
+ }
+
+ CdmInterface* cdm_;
+
+ DISALLOW_COPY_AND_ASSIGN(CdmWrapperImpl);
+};
+
+// For ContentDecryptionModule_1 and ContentDecryptionModule_2,
+// CreateSession(), UpdateSession(), and ReleaseSession() call methods
+// are incompatible with ContentDecryptionModule_3. Use the following
+// templated functions to handle this.
+
+template <class CdmInterface>
+void PrefixedGenerateKeyRequest(CdmWrapper* wrapper,
+ CdmInterface* cdm,
+ uint32_t session_id,
+ const char* type,
+ uint32_t type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) {
+ // As it is possible for CDMs to reply synchronously during the call to
+ // GenerateKeyRequest(), keep track of |session_id|.
+ wrapper->current_key_request_session_id_ = session_id;
+
+ cdm::Status status =
+ cdm->GenerateKeyRequest(type, type_size, init_data, init_data_size);
+ PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
+ if (status != cdm::kSuccess) {
+ // If GenerateKeyRequest() failed, no subsequent asynchronous replies
+ // will be sent. Verify that a response was sent synchronously.
+ PP_DCHECK(wrapper->current_key_request_session_id_ ==
+ CdmWrapper::kInvalidSessionId);
+ wrapper->current_key_request_session_id_ = CdmWrapper::kInvalidSessionId;
+ return;
+ }
+
+ if (wrapper->current_key_request_session_id_) {
+ // If this request is still pending (SendKeyMessage() or SendKeyError()
+ // not called synchronously), add |session_id| to the end of the queue.
+ // Without CDM support, it is impossible to match SendKeyMessage()
+ // (or SendKeyError()) responses to the |session_id|. Doing the best
+ // we can by keeping track of this in a queue, and assuming the responses
+ // come back in order.
+ wrapper->pending_key_request_session_ids_.push(session_id);
+ wrapper->current_key_request_session_id_ = CdmWrapper::kInvalidSessionId;
+ }
+}
+
+template <class CdmInterface>
+CdmWrapper::Result PrefixedAddKey(CdmWrapper* wrapper,
+ CdmInterface* cdm,
+ uint32_t session_id,
+ const uint8_t* response,
+ uint32_t response_size) {
+ const std::string web_session_id = wrapper->LookupWebSessionId(session_id);
+ if (web_session_id.empty()) {
+ // Possible if UpdateSession() called before CreateSession().
+ return CdmWrapper::CALL_KEY_ERROR;
+ }
+
+ // CDM_1 and CDM_2 accept initdata, which is no longer needed.
+ // In it's place pass in NULL.
+ cdm::Status status = cdm->AddKey(web_session_id.data(), web_session_id.size(),
+ response, response_size,
+ NULL, 0);
+ PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
+ if (status != cdm::kSuccess) {
+ // Some CDMs using Host_1/2 don't call keyerror, so send one.
+ return CdmWrapper::CALL_KEY_ERROR;
+ }
+
+ return CdmWrapper::CALL_KEY_ADDED;
+}
+
+template <class CdmInterface>
+CdmWrapper::Result PrefixedCancelKeyRequest(CdmWrapper* wrapper,
+ CdmInterface* cdm,
+ uint32_t session_id) {
+ const std::string web_session_id = wrapper->LookupWebSessionId(session_id);
+ if (web_session_id.empty()) {
+ // Possible if ReleaseSession() called before CreateSession().
+ return CdmWrapper::CALL_KEY_ERROR;
+ }
+
+ wrapper->session_map_.erase(session_id);
+ cdm::Status status =
+ cdm->CancelKeyRequest(web_session_id.data(), web_session_id.size());
+
+ PP_DCHECK(status == cdm::kSuccess || status == cdm::kSessionError);
+ if (status != cdm::kSuccess) {
+ // Some CDMs using Host_1/2 don't call keyerror, so send one.
+ return CdmWrapper::CALL_KEY_ERROR;
+ }
+
+ return CdmWrapper::NO_ACTION;
+}
+
+// Specializations for ContentDecryptionModule_1.
+
+template <>
+void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::CreateSession(
+ uint32_t session_id,
+ const char* type,
+ uint32_t type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) {
+ PrefixedGenerateKeyRequest(
+ this, cdm_, session_id, type, type_size, init_data, init_data_size);
+}
+
+template <>
+CdmWrapper::Result CdmWrapperImpl<
+ cdm::ContentDecryptionModule_1>::UpdateSession(uint32_t session_id,
+ const uint8_t* response,
+ uint32_t response_size) {
+ return PrefixedAddKey(this, cdm_, session_id, response, response_size);
+}
+
+template <>
+CdmWrapper::Result CdmWrapperImpl<
+ cdm::ContentDecryptionModule_1>::ReleaseSession(uint32_t session_id) {
+ return PrefixedCancelKeyRequest(this, cdm_, session_id);
+}
+
+template <> void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
+ OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) {
+ PP_NOTREACHED();
+}
+
+template <> void CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
+ OnQueryOutputProtectionStatus(uint32_t link_mask,
+ uint32_t output_protection_mask) {
+ PP_NOTREACHED();
+}
+
+template <> cdm::Status CdmWrapperImpl<cdm::ContentDecryptionModule_1>::
+ DecryptAndDecodeSamples(const cdm::InputBuffer& encrypted_buffer,
+ cdm::AudioFrames* audio_frames) {
+ AudioFramesImpl audio_frames_1;
+ cdm::Status status =
+ cdm_->DecryptAndDecodeSamples(encrypted_buffer, &audio_frames_1);
+ if (status != cdm::kSuccess)
+ return status;
+
+ audio_frames->SetFrameBuffer(audio_frames_1.PassFrameBuffer());
+ audio_frames->SetFormat(cdm::kAudioFormatS16);
+ return cdm::kSuccess;
+}
+
+// Specializations for ContentDecryptionModule_2.
+
+template <>
+void CdmWrapperImpl<cdm::ContentDecryptionModule_2>::CreateSession(
+ uint32_t session_id,
+ const char* type,
+ uint32_t type_size,
+ const uint8_t* init_data,
+ uint32_t init_data_size) {
+ PrefixedGenerateKeyRequest(
+ this, cdm_, session_id, type, type_size, init_data, init_data_size);
+}
+
+template <>
+CdmWrapper::Result CdmWrapperImpl<
+ cdm::ContentDecryptionModule_2>::UpdateSession(uint32_t session_id,
+ const uint8_t* response,
+ uint32_t response_size) {
+ return PrefixedAddKey(this, cdm_, session_id, response, response_size);
+}
+
+template <>
+CdmWrapper::Result CdmWrapperImpl<
+ cdm::ContentDecryptionModule_2>::ReleaseSession(uint32_t session_id) {
+ return PrefixedCancelKeyRequest(this, cdm_, session_id);
+}
+
+CdmWrapper* CdmWrapper::Create(const char* key_system,
+ uint32_t key_system_size,
+ GetCdmHostFunc get_cdm_host_func,
+ void* user_data) {
+ COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_3::kVersion,
+ update_code_below);
+
+ // Ensure IsSupportedCdmInterfaceVersion matches this implementation.
+ // Always update this DCHECK when updating this function.
+ // If this check fails, update this function and DCHECK or update
+ // IsSupportedCdmInterfaceVersion.
+ PP_DCHECK(
+ !IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule::kVersion + 1) &&
+ IsSupportedCdmInterfaceVersion(cdm::ContentDecryptionModule::kVersion) &&
+ IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_2::kVersion) &&
+ IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_1::kVersion) &&
+ !IsSupportedCdmInterfaceVersion(
+ cdm::ContentDecryptionModule_1::kVersion - 1));
+
+ // Try to create the CDM using the latest CDM interface version.
+ CdmWrapper* cdm_wrapper =
+ CdmWrapperImpl<cdm::ContentDecryptionModule>::Create(
+ key_system, key_system_size, get_cdm_host_func, user_data);
+ if (cdm_wrapper)
+ return cdm_wrapper;
+
+ // Try to see if the CDM supports older version(s) of the CDM interface.
+ cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_2>::Create(
+ key_system, key_system_size, get_cdm_host_func, user_data);
+ if (cdm_wrapper)
+ return cdm_wrapper;
+
+ cdm_wrapper = CdmWrapperImpl<cdm::ContentDecryptionModule_1>::Create(
+ key_system, key_system_size, get_cdm_host_func, user_data);
+ return cdm_wrapper;
+}
+
+// When updating the CdmAdapter, ensure you've updated the CdmWrapper to contain
+// stub implementations for new or modified methods that the older CDM interface
+// does not have.
+// Also update supported_cdm_versions.h.
+COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_3::kVersion,
+ ensure_cdm_wrapper_templates_have_old_version_support);
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CDM_WRAPPER_H_
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm.cc b/chromium/media/cdm/ppapi/clear_key_cdm.cc
index cd6a61ff313..6d3a68ce5ac 100644
--- a/chromium/media/cdm/ppapi/clear_key_cdm.cc
+++ b/chromium/media/cdm/ppapi/clear_key_cdm.cc
@@ -19,7 +19,7 @@
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
#include "base/basictypes.h"
-static const int64 kNoTimestamp = kint64min;
+const int64 kNoTimestamp = kint64min;
#endif // CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER
#if defined(CLEAR_KEY_CDM_USE_FFMPEG_DECODER)
@@ -59,22 +59,24 @@ static bool InitializeFFmpegLibraries() {
static bool g_ffmpeg_lib_initialized = InitializeFFmpegLibraries();
#endif // CLEAR_KEY_CDM_USE_FFMPEG_DECODER
-static const char kClearKeyCdmVersion[] = "0.1.0.1";
-static const char kExternalClearKey[] = "org.chromium.externalclearkey";
-static const int64 kSecondsPerMinute = 60;
-static const int64 kMsPerSecond = 1000;
-static const int64 kInitialTimerDelayMs = 200;
-static const int64 kMaxTimerDelayMs = 1 * kSecondsPerMinute * kMsPerSecond;
+const char kClearKeyCdmVersion[] = "0.1.0.1";
+const char kExternalClearKeyKeySystem[] = "org.chromium.externalclearkey";
+const char kExternalClearKeyDecryptOnlyKeySystem[] =
+ "org.chromium.externalclearkey.decryptonly";
+const int64 kSecondsPerMinute = 60;
+const int64 kMsPerSecond = 1000;
+const int64 kInitialTimerDelayMs = 200;
+const int64 kMaxTimerDelayMs = 1 * kSecondsPerMinute * kMsPerSecond;
// Heart beat message header. If a key message starts with |kHeartBeatHeader|,
// it's a heart beat message. Otherwise, it's a key request.
-static const char kHeartBeatHeader[] = "HEARTBEAT";
+const char kHeartBeatHeader[] = "HEARTBEAT";
// Copies |input_buffer| into a media::DecoderBuffer. If the |input_buffer| is
// empty, an empty (end-of-stream) media::DecoderBuffer is returned.
static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
const cdm::InputBuffer& input_buffer) {
if (!input_buffer.data) {
- DCHECK_EQ(input_buffer.data_size, 0);
+ DCHECK(!input_buffer.data_size);
return media::DecoderBuffer::CreateEOSBuffer();
}
@@ -83,7 +85,7 @@ static scoped_refptr<media::DecoderBuffer> CopyDecoderBufferFrom(
media::DecoderBuffer::CopyFrom(input_buffer.data, input_buffer.data_size);
std::vector<media::SubsampleEntry> subsamples;
- for (int32_t i = 0; i < input_buffer.num_subsamples; ++i) {
+ for (uint32_t i = 0; i < input_buffer.num_subsamples; ++i) {
media::SubsampleEntry subsample;
subsample.clear_bytes = input_buffer.subsamples[i].clear_bytes;
subsample.cypher_bytes = input_buffer.subsamples[i].cipher_bytes;
@@ -125,22 +127,29 @@ void INITIALIZE_CDM_MODULE() {
void DeinitializeCdmModule() {
}
-void* CreateCdmInstance(
- int cdm_interface_version,
- const char* key_system, int key_system_size,
- GetCdmHostFunc get_cdm_host_func, void* user_data) {
+void* CreateCdmInstance(int cdm_interface_version,
+ const char* key_system, uint32_t key_system_size,
+ GetCdmHostFunc get_cdm_host_func,
+ void* user_data) {
DVLOG(1) << "CreateCdmInstance()";
- if (cdm_interface_version != cdm::kCdmInterfaceVersion)
+ std::string key_system_string(key_system, key_system_size);
+ if (key_system_string != kExternalClearKeyKeySystem &&
+ key_system_string != kExternalClearKeyDecryptOnlyKeySystem) {
+ DVLOG(1) << "Unsupported key system:" << key_system_string;
+ return NULL;
+ }
+
+ if (cdm_interface_version != media::ClearKeyCdmInterface::kVersion)
return NULL;
- cdm::Host* host = static_cast<cdm::Host*>(
- get_cdm_host_func(cdm::kHostInterfaceVersion, user_data));
+ media::ClearKeyCdmHost* host = static_cast<media::ClearKeyCdmHost*>(
+ get_cdm_host_func(media::ClearKeyCdmHost::kVersion, user_data));
if (!host)
return NULL;
- return static_cast<cdm::ContentDecryptionModule*>(
- new media::ClearKeyCdm(host));
+ return new media::ClearKeyCdm(
+ host, key_system_string == kExternalClearKeyDecryptOnlyKeySystem);
}
const char* GetCdmVersion() {
@@ -149,43 +158,64 @@ const char* GetCdmVersion() {
namespace media {
-ClearKeyCdm::Client::Client() : status_(kKeyError) {}
+// Since all the calls to AesDecryptor are synchronous, pass a dummy value for
+// session_id that is never exposed outside this class.
+// TODO(jrummell): Remove usage of this when the CDM interface is updated
+// to use session_id.
+
+ClearKeyCdm::Client::Client()
+ : status_(kNone), error_code_(MediaKeys::kUnknownError), system_code_(0) {}
ClearKeyCdm::Client::~Client() {}
void ClearKeyCdm::Client::Reset() {
- status_ = kKeyError;
- session_id_.clear();
- key_message_.clear();
- default_url_.clear();
+ status_ = kNone;
+ web_session_id_.clear();
+ message_.clear();
+ destination_url_.clear();
+ error_code_ = MediaKeys::kUnknownError;
+ system_code_ = 0;
+}
+
+void ClearKeyCdm::Client::OnSessionCreated(uint32 session_id,
+ const std::string& web_session_id) {
+ status_ = static_cast<Status>(status_ | kCreated);
+ web_session_id_ = web_session_id;
}
-void ClearKeyCdm::Client::KeyAdded(const std::string& session_id) {
- status_ = kKeyAdded;
- session_id_ = session_id;
+void ClearKeyCdm::Client::OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) {
+ status_ = static_cast<Status>(status_ | kMessage);
+ message_ = message;
+ destination_url_ = destination_url;
}
-void ClearKeyCdm::Client::KeyError(const std::string& session_id,
- media::MediaKeys::KeyError error_code,
- int system_code) {
- status_ = kKeyError;
- session_id_ = session_id;
+void ClearKeyCdm::Client::OnSessionReady(uint32 session_id) {
+ status_ = static_cast<Status>(status_ | kReady);
}
-void ClearKeyCdm::Client::KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) {
- status_ = kKeyMessage;
- session_id_ = session_id;
- key_message_ = message;
- default_url_ = default_url;
+void ClearKeyCdm::Client::OnSessionClosed(uint32 session_id) {
+ status_ = static_cast<Status>(status_ | kClosed);
}
-ClearKeyCdm::ClearKeyCdm(cdm::Host* host)
- : decryptor_(base::Bind(&Client::KeyAdded, base::Unretained(&client_)),
- base::Bind(&Client::KeyError, base::Unretained(&client_)),
- base::Bind(&Client::KeyMessage, base::Unretained(&client_))),
+void ClearKeyCdm::Client::OnSessionError(uint32 session_id,
+ media::MediaKeys::KeyError error_code,
+ int system_code) {
+ status_ = static_cast<Status>(status_ | kError);
+ error_code_ = error_code;
+ system_code_ = system_code;
+}
+
+ClearKeyCdm::ClearKeyCdm(ClearKeyCdmHost* host, bool is_decrypt_only)
+ : decryptor_(
+ base::Bind(&Client::OnSessionCreated, base::Unretained(&client_)),
+ base::Bind(&Client::OnSessionMessage, base::Unretained(&client_)),
+ base::Bind(&Client::OnSessionReady, base::Unretained(&client_)),
+ base::Bind(&Client::OnSessionClosed, base::Unretained(&client_)),
+ base::Bind(&Client::OnSessionError, base::Unretained(&client_))),
host_(host),
+ is_decrypt_only_(is_decrypt_only),
timer_delay_ms_(kInitialTimerDelayMs),
timer_set_(false) {
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
@@ -199,46 +229,56 @@ ClearKeyCdm::ClearKeyCdm(cdm::Host* host)
ClearKeyCdm::~ClearKeyCdm() {}
-cdm::Status ClearKeyCdm::GenerateKeyRequest(const char* type, int type_size,
+cdm::Status ClearKeyCdm::GenerateKeyRequest(const char* type,
+ uint32_t type_size,
const uint8_t* init_data,
- int init_data_size) {
+ uint32_t init_data_size) {
DVLOG(1) << "GenerateKeyRequest()";
base::AutoLock auto_lock(client_lock_);
ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.GenerateKeyRequest(std::string(type, type_size),
- init_data, init_data_size);
-
- if (client_.status() != Client::kKeyMessage) {
- host_->SendKeyError(NULL, 0, cdm::kUnknownError, 0);
+ decryptor_.CreateSession(MediaKeys::kInvalidSessionId,
+ std::string(type, type_size),
+ init_data, init_data_size);
+
+ if (client_.status() != (Client::kMessage | Client::kCreated)) {
+ // Use values returned to client if possible.
+ host_->SendKeyError(client_.web_session_id().data(),
+ client_.web_session_id().size(),
+ static_cast<cdm::MediaKeyError>(client_.error_code()),
+ client_.system_code());
return cdm::kSessionError;
}
host_->SendKeyMessage(
- client_.session_id().data(), client_.session_id().size(),
- reinterpret_cast<const char*>(&client_.key_message()[0]),
- client_.key_message().size(),
- client_.default_url().data(), client_.default_url().size());
+ client_.web_session_id().data(), client_.web_session_id().size(),
+ reinterpret_cast<const char*>(&client_.message()[0]),
+ client_.message().size(),
+ client_.destination_url().data(), client_.destination_url().size());
// Only save the latest session ID for heartbeat messages.
- heartbeat_session_id_ = client_.session_id();
+ heartbeat_session_id_ = client_.web_session_id();
return cdm::kSuccess;
}
cdm::Status ClearKeyCdm::AddKey(const char* session_id,
- int session_id_size,
+ uint32_t session_id_size,
const uint8_t* key,
- int key_size,
+ uint32_t key_size,
const uint8_t* key_id,
- int key_id_size) {
+ uint32_t key_id_size) {
DVLOG(1) << "AddKey()";
+ DCHECK(!key_id && !key_id_size);
base::AutoLock auto_lock(client_lock_);
ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.AddKey(key, key_size, key_id, key_id_size,
- std::string(session_id, session_id_size));
+ decryptor_.UpdateSession(MediaKeys::kInvalidSessionId, key, key_size);
- if (client_.status() != Client::kKeyAdded)
+ if (client_.status() != Client::kReady) {
+ host_->SendKeyError(session_id, session_id_size,
+ static_cast<cdm::MediaKeyError>(client_.error_code()),
+ client_.system_code());
return cdm::kSessionError;
+ }
if (!timer_set_) {
ScheduleNextHeartBeat();
@@ -249,11 +289,21 @@ cdm::Status ClearKeyCdm::AddKey(const char* session_id,
}
cdm::Status ClearKeyCdm::CancelKeyRequest(const char* session_id,
- int session_id_size) {
+ uint32_t session_id_size) {
DVLOG(1) << "CancelKeyRequest()";
base::AutoLock auto_lock(client_lock_);
ScopedResetter<Client> auto_resetter(&client_);
- decryptor_.CancelKeyRequest(std::string(session_id, session_id_size));
+ decryptor_.ReleaseSession(MediaKeys::kInvalidSessionId);
+
+ // No message normally sent by Release(), but if an error occurred,
+ // report it as a failure.
+ if (client_.status() == Client::kError) {
+ host_->SendKeyError(session_id, session_id_size,
+ static_cast<cdm::MediaKeyError>(client_.error_code()),
+ client_.system_code());
+ return cdm::kSessionError;
+ }
+
return cdm::kSuccess;
}
@@ -313,6 +363,9 @@ cdm::Status ClearKeyCdm::Decrypt(
cdm::Status ClearKeyCdm::InitializeAudioDecoder(
const cdm::AudioDecoderConfig& audio_decoder_config) {
+ if (is_decrypt_only_)
+ return cdm::kSessionError;
+
#if defined(CLEAR_KEY_CDM_USE_FFMPEG_DECODER)
if (!audio_decoder_)
audio_decoder_.reset(new media::FFmpegCdmAudioDecoder(host_));
@@ -334,6 +387,9 @@ cdm::Status ClearKeyCdm::InitializeAudioDecoder(
cdm::Status ClearKeyCdm::InitializeVideoDecoder(
const cdm::VideoDecoderConfig& video_decoder_config) {
+ if (is_decrypt_only_)
+ return cdm::kSessionError;
+
if (video_decoder_ && video_decoder_->is_initialized()) {
DCHECK(!video_decoder_->is_initialized());
return cdm::kSessionError;
@@ -391,7 +447,7 @@ cdm::Status ClearKeyCdm::DecryptAndDecodeFrame(
const cdm::InputBuffer& encrypted_buffer,
cdm::VideoFrame* decoded_frame) {
DVLOG(1) << "DecryptAndDecodeFrame()";
- TRACE_EVENT0("eme", "ClearKeyCdm::DecryptAndDecodeFrame");
+ TRACE_EVENT0("media", "ClearKeyCdm::DecryptAndDecodeFrame");
scoped_refptr<media::DecoderBuffer> buffer;
cdm::Status status = DecryptToMediaDecoderBuffer(encrypted_buffer, &buffer);
@@ -496,6 +552,16 @@ cdm::Status ClearKeyCdm::DecryptToMediaDecoderBuffer(
return cdm::kSuccess;
}
+void ClearKeyCdm::OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) {
+ NOTIMPLEMENTED();
+}
+
+void ClearKeyCdm::OnQueryOutputProtectionStatus(
+ uint32_t link_mask, uint32_t output_protection_mask) {
+ NOTIMPLEMENTED();
+};
+
#if defined(CLEAR_KEY_CDM_USE_FAKE_AUDIO_DECODER)
int64 ClearKeyCdm::CurrentTimeStampInMicroseconds() const {
return output_timestamp_base_in_microseconds_ +
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm.h b/chromium/media/cdm/ppapi/clear_key_cdm.h
index 67637eb8d31..0ec18a101a0 100644
--- a/chromium/media/cdm/ppapi/clear_key_cdm.h
+++ b/chromium/media/cdm/ppapi/clear_key_cdm.h
@@ -14,7 +14,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "media/cdm/aes_decryptor.h"
-#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "media/cdm/ppapi/clear_key_cdm_common.h"
// Enable this to use the fake decoder for testing.
// TODO(tomfinegan): Move fake audio decoder into a separate class.
@@ -28,20 +28,23 @@ class DecoderBuffer;
class FFmpegCdmAudioDecoder;
// Clear key implementation of the cdm::ContentDecryptionModule interface.
-class ClearKeyCdm : public cdm::ContentDecryptionModule {
+class ClearKeyCdm : public ClearKeyCdmInterface {
public:
- explicit ClearKeyCdm(cdm::Host* host);
+ explicit ClearKeyCdm(Host* host, bool is_decrypt_only);
virtual ~ClearKeyCdm();
// ContentDecryptionModule implementation.
virtual cdm::Status GenerateKeyRequest(
- const char* type, int type_size,
- const uint8_t* init_data, int init_data_size) OVERRIDE;
- virtual cdm::Status AddKey(const char* session_id, int session_id_size,
- const uint8_t* key, int key_size,
- const uint8_t* key_id, int key_id_size) OVERRIDE;
+ const char* type, uint32_t type_size,
+ const uint8_t* init_data, uint32_t init_data_size) OVERRIDE;
+ virtual cdm::Status AddKey(const char* session_id,
+ uint32_t session_id_size,
+ const uint8_t* key,
+ uint32_t key_size,
+ const uint8_t* key_id,
+ uint32_t key_id_size) OVERRIDE;
virtual cdm::Status CancelKeyRequest(const char* session_id,
- int session_id_size) OVERRIDE;
+ uint32_t session_id_size) OVERRIDE;
virtual void TimerExpired(void* context) OVERRIDE;
virtual cdm::Status Decrypt(const cdm::InputBuffer& encrypted_buffer,
cdm::DecryptedBlock* decrypted_block) OVERRIDE;
@@ -58,42 +61,57 @@ class ClearKeyCdm : public cdm::ContentDecryptionModule {
const cdm::InputBuffer& encrypted_buffer,
cdm::AudioFrames* audio_frames) OVERRIDE;
virtual void Destroy() OVERRIDE;
+ virtual void OnPlatformChallengeResponse(
+ const cdm::PlatformChallengeResponse& response) OVERRIDE;
+ virtual void OnQueryOutputProtectionStatus(
+ uint32_t link_mask, uint32_t output_protection_mask) OVERRIDE;
private:
// TODO(xhwang): After we removed DecryptorClient. We probably can also remove
// this Client class as well. Investigate this possibility.
class Client {
public:
+ // TODO(jrummell): Remove bitmask and rename kNone to kInvalid once CDM
+ // interface supports session_id passing completely.
enum Status {
- kKeyAdded,
- kKeyError,
- kKeyMessage
+ kNone = 0,
+ kCreated = 1 << 0,
+ kMessage = 1 << 1,
+ kReady = 1 << 2,
+ kClosed = 1 << 3,
+ kError = 1 << 4
};
Client();
virtual ~Client();
Status status() { return status_; }
- const std::string& session_id() { return session_id_; }
- const std::vector<uint8>& key_message() { return key_message_; }
- const std::string& default_url() { return default_url_; }
+ const std::string& web_session_id() { return web_session_id_; }
+ const std::vector<uint8>& message() { return message_; }
+ const std::string& destination_url() { return destination_url_; }
+ MediaKeys::KeyError error_code() { return error_code_; }
+ int system_code() { return system_code_; }
// Resets the Client to a clean state.
void Reset();
- void KeyAdded(const std::string& session_id);
- void KeyError(const std::string& session_id,
- MediaKeys::KeyError error_code,
- int system_code);
- void KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url);
+ void OnSessionCreated(uint32 session_id, const std::string& web_session_id);
+ void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url);
+ void OnSessionReady(uint32 session_id);
+ void OnSessionClosed(uint32 session_id);
+ void OnSessionError(uint32 session_id,
+ MediaKeys::KeyError error_code,
+ int system_code);
private:
Status status_;
- std::string session_id_;
- std::vector<uint8> key_message_;
- std::string default_url_;
+ std::string web_session_id_;
+ std::vector<uint8> message_;
+ std::string destination_url_;
+ MediaKeys::KeyError error_code_;
+ int system_code_;
};
// Prepares next heartbeat message and sets a timer for it.
@@ -132,7 +150,9 @@ class ClearKeyCdm : public cdm::ContentDecryptionModule {
// simultaneously.
base::Lock client_lock_;
- cdm::Host* host_;
+ ClearKeyCdmHost* host_;
+
+ const bool is_decrypt_only_;
std::string heartbeat_session_id_;
std::string next_heartbeat_message_;
diff --git a/chromium/media/cdm/ppapi/clear_key_cdm_common.h b/chromium/media/cdm/ppapi/clear_key_cdm_common.h
new file mode 100644
index 00000000000..8f843974eca
--- /dev/null
+++ b/chromium/media/cdm/ppapi/clear_key_cdm_common.h
@@ -0,0 +1,18 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
+#define MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
+
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+
+namespace media {
+
+// Aliases for the version of the interfaces that this CDM implements.
+typedef cdm::ContentDecryptionModule_2 ClearKeyCdmInterface;
+typedef ClearKeyCdmInterface::Host ClearKeyCdmHost;
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_CLEAR_KEY_CDM_COMMON_H_
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc b/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc
index ed619b2bef7..082b35eccc6 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc
+++ b/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc
@@ -12,6 +12,7 @@
#include "media/base/buffers.h"
#include "media/base/data_buffer.h"
#include "media/base/limits.h"
+#include "media/ffmpeg/ffmpeg_common.h"
// Include FFmpeg header files.
extern "C" {
@@ -79,12 +80,59 @@ static void CdmAudioDecoderConfigToAVCodecContext(
}
}
-FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host)
+static cdm::AudioFormat AVSampleFormatToCdmAudioFormat(
+ AVSampleFormat sample_format) {
+ switch (sample_format) {
+ case AV_SAMPLE_FMT_U8:
+ return cdm::kAudioFormatU8;
+ case AV_SAMPLE_FMT_S16:
+ return cdm::kAudioFormatS16;
+ case AV_SAMPLE_FMT_S32:
+ return cdm::kAudioFormatS32;
+ case AV_SAMPLE_FMT_FLT:
+ return cdm::kAudioFormatF32;
+ case AV_SAMPLE_FMT_S16P:
+ return cdm::kAudioFormatPlanarS16;
+ case AV_SAMPLE_FMT_FLTP:
+ return cdm::kAudioFormatPlanarF32;
+ default:
+ DVLOG(1) << "Unknown AVSampleFormat: " << sample_format;
+ }
+ return cdm::kUnknownAudioFormat;
+}
+
+static void CopySamples(cdm::AudioFormat cdm_format,
+ int decoded_audio_size,
+ const AVFrame& av_frame,
+ uint8_t* output_buffer) {
+ switch (cdm_format) {
+ case cdm::kAudioFormatU8:
+ case cdm::kAudioFormatS16:
+ case cdm::kAudioFormatS32:
+ case cdm::kAudioFormatF32:
+ memcpy(output_buffer, av_frame.data[0], decoded_audio_size);
+ break;
+ case cdm::kAudioFormatPlanarS16:
+ case cdm::kAudioFormatPlanarF32: {
+ const int decoded_size_per_channel =
+ decoded_audio_size / av_frame.channels;
+ for (int i = 0; i < av_frame.channels; ++i) {
+ memcpy(output_buffer,
+ av_frame.extended_data[i],
+ decoded_size_per_channel);
+ output_buffer += decoded_size_per_channel;
+ }
+ break;
+ }
+ default:
+ NOTREACHED() << "Unsupported CDM Audio Format!";
+ memset(output_buffer, 0, decoded_audio_size);
+ }
+}
+
+FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(ClearKeyCdmHost* host)
: is_initialized_(false),
host_(host),
- codec_context_(NULL),
- av_frame_(NULL),
- bits_per_channel_(0),
samples_per_second_(0),
channels_(0),
av_sample_format_(0),
@@ -99,7 +147,6 @@ FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() {
bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) {
DVLOG(1) << "Initialize()";
-
if (!IsValidConfig(config)) {
LOG(ERROR) << "Initialize(): invalid audio decoder configuration.";
return false;
@@ -111,15 +158,15 @@ bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) {
}
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context3(NULL);
- CdmAudioDecoderConfigToAVCodecContext(config, codec_context_);
+ codec_context_.reset(avcodec_alloc_context3(NULL));
+ CdmAudioDecoderConfigToAVCodecContext(config, codec_context_.get());
// MP3 decodes to S16P which we don't support, tell it to use S16 instead.
if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P)
codec_context_->request_sample_fmt = AV_SAMPLE_FMT_S16;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
+ if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
DLOG(ERROR) << "Could not initialize audio decoder: "
<< codec_context_->codec_id;
return false;
@@ -132,27 +179,12 @@ bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) {
return false;
}
- // Some codecs will only output float data, so we need to convert to integer
- // before returning the decoded buffer.
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP ||
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) {
- // Preallocate the AudioBus for float conversions. We can treat interleaved
- // float data as a single planar channel since our output is expected in an
- // interleaved format anyways.
- int channels = codec_context_->channels;
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT)
- channels = 1;
- converter_bus_ = AudioBus::CreateWrapper(channels);
- }
-
// Success!
- av_frame_ = avcodec_alloc_frame();
- bits_per_channel_ = config.bits_per_channel;
+ av_frame_.reset(av_frame_alloc());
samples_per_second_ = config.samples_per_second;
- bytes_per_frame_ = codec_context_->channels * bits_per_channel_ / 8;
+ bytes_per_frame_ = codec_context_->channels * config.bits_per_channel / 8;
output_timestamp_helper_.reset(
new AudioTimestampHelper(config.samples_per_second));
- serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_);
is_initialized_ = true;
// Store initial values to guard against midstream configuration changes.
@@ -171,7 +203,7 @@ void FFmpegCdmAudioDecoder::Deinitialize() {
void FFmpegCdmAudioDecoder::Reset() {
DVLOG(1) << "Reset()";
- avcodec_flush_buffers(codec_context_);
+ avcodec_flush_buffers(codec_context_.get());
ResetTimestampState();
}
@@ -227,17 +259,23 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
packet.data = const_cast<uint8_t*>(compressed_buffer);
packet.size = compressed_buffer_size;
+ // Tell the CDM what AudioFormat we're using.
+ const cdm::AudioFormat cdm_format = AVSampleFormatToCdmAudioFormat(
+ static_cast<AVSampleFormat>(av_sample_format_));
+ DCHECK_NE(cdm_format, cdm::kUnknownAudioFormat);
+ decoded_frames->SetFormat(cdm_format);
+
// Each audio packet may contain several frames, so we must call the decoder
// until we've exhausted the packet. Regardless of the packet size we always
// want to hand it to the decoder at least once, otherwise we would end up
// skipping end of stream packets since they have a size of zero.
do {
// Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
+ avcodec_get_frame_defaults(av_frame_.get());
int frame_decoded = 0;
int result = avcodec_decode_audio4(
- codec_context_, av_frame_, &frame_decoded, &packet);
+ codec_context_.get(), av_frame_.get(), &frame_decoded, &packet);
if (result < 0) {
DCHECK(!is_end_of_stream)
@@ -290,76 +328,63 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
decoded_audio_size = av_samples_get_buffer_size(
NULL, codec_context_->channels, av_frame_->nb_samples,
codec_context_->sample_fmt, 1);
- // If we're decoding into float, adjust audio size.
- if (converter_bus_ && bits_per_channel_ / 8 != sizeof(float)) {
- DCHECK(codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT ||
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP);
- decoded_audio_size *=
- static_cast<float>(bits_per_channel_ / 8) / sizeof(float);
- }
}
- int start_sample = 0;
if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) {
DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0)
<< "Decoder didn't output full frames";
int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_);
- start_sample = dropped_size / bytes_per_frame_;
decoded_audio_size -= dropped_size;
output_bytes_to_drop_ -= dropped_size;
}
- scoped_refptr<DataBuffer> output;
if (decoded_audio_size > 0) {
DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0)
<< "Decoder didn't output full frames";
- // Convert float data using an AudioBus.
- if (converter_bus_) {
- // Setup the AudioBus as a wrapper of the AVFrame data and then use
- // AudioBus::ToInterleaved() to convert the data as necessary.
- int skip_frames = start_sample;
- int total_frames = av_frame_->nb_samples;
- int frames_to_interleave = decoded_audio_size / bytes_per_frame_;
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) {
- DCHECK_EQ(converter_bus_->channels(), 1);
- total_frames *= codec_context_->channels;
- skip_frames *= codec_context_->channels;
- frames_to_interleave *= codec_context_->channels;
- }
+ base::TimeDelta output_timestamp =
+ output_timestamp_helper_->GetTimestamp();
+ output_timestamp_helper_->AddFrames(decoded_audio_size /
+ bytes_per_frame_);
- converter_bus_->set_frames(total_frames);
- for (int i = 0; i < converter_bus_->channels(); ++i) {
- converter_bus_->SetChannelData(i, reinterpret_cast<float*>(
- av_frame_->extended_data[i]));
+ // If we've exhausted the packet in the first decode we can write directly
+ // into the frame buffer instead of a multistep serialization approach.
+ if (serialized_audio_frames_.empty() && !packet.size) {
+ const uint32_t buffer_size = decoded_audio_size + sizeof(int64) * 2;
+ decoded_frames->SetFrameBuffer(host_->Allocate(buffer_size));
+ if (!decoded_frames->FrameBuffer()) {
+ LOG(ERROR) << "DecodeBuffer() ClearKeyCdmHost::Allocate failed.";
+ return cdm::kDecodeError;
}
+ decoded_frames->FrameBuffer()->SetSize(buffer_size);
+ uint8_t* output_buffer = decoded_frames->FrameBuffer()->Data();
- output = new DataBuffer(decoded_audio_size);
- output->set_data_size(decoded_audio_size);
+ const int64 timestamp = output_timestamp.InMicroseconds();
+ memcpy(output_buffer, &timestamp, sizeof(timestamp));
+ output_buffer += sizeof(timestamp);
- DCHECK_EQ(frames_to_interleave, converter_bus_->frames() - skip_frames);
- converter_bus_->ToInterleavedPartial(
- skip_frames, frames_to_interleave, bits_per_channel_ / 8,
- output->writable_data());
- } else {
- output = DataBuffer::CopyFrom(
- av_frame_->extended_data[0] + start_sample * bytes_per_frame_,
- decoded_audio_size);
- }
+ const int64 output_size = decoded_audio_size;
+ memcpy(output_buffer, &output_size, sizeof(output_size));
+ output_buffer += sizeof(output_size);
- base::TimeDelta output_timestamp =
- output_timestamp_helper_->GetTimestamp();
- output_timestamp_helper_->AddFrames(decoded_audio_size /
- bytes_per_frame_);
+ // Copy the samples and return success.
+ CopySamples(
+ cdm_format, decoded_audio_size, *av_frame_, output_buffer);
+ return cdm::kSuccess;
+ }
- // Serialize the audio samples into |serialized_audio_frames_|.
+ // There are still more frames to decode, so we need to serialize them in
+ // a secondary buffer since we don't know their sizes ahead of time (which
+ // is required to allocate the FrameBuffer object).
SerializeInt64(output_timestamp.InMicroseconds());
- SerializeInt64(output->data_size());
- serialized_audio_frames_.insert(
- serialized_audio_frames_.end(),
- output->data(),
- output->data() + output->data_size());
+ SerializeInt64(decoded_audio_size);
+
+ const size_t previous_size = serialized_audio_frames_.size();
+ serialized_audio_frames_.resize(previous_size + decoded_audio_size);
+ uint8_t* output_buffer = &serialized_audio_frames_[0] + previous_size;
+ CopySamples(
+ cdm_format, decoded_audio_size, *av_frame_, output_buffer);
}
} while (packet.size > 0);
@@ -367,7 +392,7 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
decoded_frames->SetFrameBuffer(
host_->Allocate(serialized_audio_frames_.size()));
if (!decoded_frames->FrameBuffer()) {
- LOG(ERROR) << "DecodeBuffer() cdm::Host::Allocate failed.";
+ LOG(ERROR) << "DecodeBuffer() ClearKeyCdmHost::Allocate failed.";
return cdm::kDecodeError;
}
memcpy(decoded_frames->FrameBuffer()->Data(),
@@ -391,20 +416,12 @@ void FFmpegCdmAudioDecoder::ResetTimestampState() {
void FFmpegCdmAudioDecoder::ReleaseFFmpegResources() {
DVLOG(1) << "ReleaseFFmpegResources()";
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- codec_context_ = NULL;
- }
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
+ codec_context_.reset();
+ av_frame_.reset();
}
void FFmpegCdmAudioDecoder::SerializeInt64(int64 value) {
- int previous_size = serialized_audio_frames_.size();
+ const size_t previous_size = serialized_audio_frames_.size();
serialized_audio_frames_.resize(previous_size + sizeof(value));
memcpy(&serialized_audio_frames_[0] + previous_size, &value, sizeof(value));
}
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h b/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h
index 1b4fb8f2af5..81362d498fb 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h
+++ b/chromium/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h
@@ -11,7 +11,7 @@
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
-#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "media/cdm/ppapi/clear_key_cdm_common.h"
struct AVCodecContext;
struct AVFrame;
@@ -19,17 +19,18 @@ struct AVFrame;
namespace media {
class AudioBus;
class AudioTimestampHelper;
+class ScopedPtrAVFreeContext;
+class ScopedPtrAVFreeFrame;
}
namespace media {
-
// TODO(xhwang): This class is partially cloned from FFmpegAudioDecoder. When
// FFmpegAudioDecoder is updated, it's a pain to keep this class in sync with
// FFmpegAudioDecoder. We need a long term sustainable solution for this. See
// http://crbug.com/169203
class FFmpegCdmAudioDecoder {
public:
- explicit FFmpegCdmAudioDecoder(cdm::Host* host);
+ explicit FFmpegCdmAudioDecoder(ClearKeyCdmHost* host);
~FFmpegCdmAudioDecoder();
bool Initialize(const cdm::AudioDecoderConfig& config);
void Deinitialize();
@@ -60,14 +61,13 @@ class FFmpegCdmAudioDecoder {
bool is_initialized_;
- cdm::Host* const host_;
+ ClearKeyCdmHost* const host_;
// FFmpeg structures owned by this object.
- AVCodecContext* codec_context_;
- AVFrame* av_frame_;
+ scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
// Audio format.
- int bits_per_channel_;
int samples_per_second_;
int channels_;
@@ -79,10 +79,6 @@ class FFmpegCdmAudioDecoder {
int bytes_per_frame_;
base::TimeDelta last_input_timestamp_;
- // We may need to convert the audio data coming out of FFmpeg from planar
- // float to integer.
- scoped_ptr<AudioBus> converter_bus_;
-
// Number of output sample bytes to drop before generating output buffers.
// This is required for handling negative timestamps when decoding Vorbis
// audio, for example.
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc
index 9a2439d299c..7ffda24a727 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/buffers.h"
#include "media/base/limits.h"
+#include "media/ffmpeg/ffmpeg_common.h"
// Include FFmpeg header files.
extern "C" {
@@ -127,10 +128,8 @@ static void CopyPlane(const uint8_t* source,
}
}
-FFmpegCdmVideoDecoder::FFmpegCdmVideoDecoder(cdm::Host* host)
- : codec_context_(NULL),
- av_frame_(NULL),
- is_initialized_(false),
+FFmpegCdmVideoDecoder::FFmpegCdmVideoDecoder(ClearKeyCdmHost* host)
+ : is_initialized_(false),
host_(host) {
}
@@ -152,8 +151,8 @@ bool FFmpegCdmVideoDecoder::Initialize(const cdm::VideoDecoderConfig& config) {
}
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context3(NULL);
- CdmVideoDecoderConfigToAVCodecContext(config, codec_context_);
+ codec_context_.reset(avcodec_alloc_context3(NULL));
+ CdmVideoDecoderConfigToAVCodecContext(config, codec_context_.get());
// Enable motion vector search (potentially slow), strong deblocking filter
// for damaged macroblocks, and set our error detection sensitivity.
@@ -170,12 +169,12 @@ bool FFmpegCdmVideoDecoder::Initialize(const cdm::VideoDecoderConfig& config) {
}
int status;
- if ((status = avcodec_open2(codec_context_, codec, NULL)) < 0) {
+ if ((status = avcodec_open2(codec_context_.get(), codec, NULL)) < 0) {
LOG(ERROR) << "Initialize(): avcodec_open2 failed: " << status;
return false;
}
- av_frame_ = avcodec_alloc_frame();
+ av_frame_.reset(av_frame_alloc());
is_initialized_ = true;
return true;
@@ -189,7 +188,7 @@ void FFmpegCdmVideoDecoder::Deinitialize() {
void FFmpegCdmVideoDecoder::Reset() {
DVLOG(1) << "Reset()";
- avcodec_flush_buffers(codec_context_);
+ avcodec_flush_buffers(codec_context_.get());
}
// static
@@ -223,15 +222,15 @@ cdm::Status FFmpegCdmVideoDecoder::DecodeFrame(
codec_context_->reordered_opaque = timestamp;
// Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
+ avcodec_get_frame_defaults(av_frame_.get());
// This is for codecs not using get_buffer to initialize
// |av_frame_->reordered_opaque|
av_frame_->reordered_opaque = codec_context_->reordered_opaque;
int frame_decoded = 0;
- int result = avcodec_decode_video2(codec_context_,
- av_frame_,
+ int result = avcodec_decode_video2(codec_context_.get(),
+ av_frame_.get(),
&frame_decoded,
&packet);
// Log the problem when we can't decode a video frame and exit early.
@@ -276,7 +275,7 @@ bool FFmpegCdmVideoDecoder::CopyAvFrameTo(cdm::VideoFrame* cdm_video_frame) {
DCHECK(!cdm_video_frame->FrameBuffer());
cdm_video_frame->SetFrameBuffer(host_->Allocate(space_required));
if (!cdm_video_frame->FrameBuffer()) {
- LOG(ERROR) << "CopyAvFrameTo() cdm::Host::Allocate failed.";
+ LOG(ERROR) << "CopyAvFrameTo() ClearKeyCdmHost::Allocate failed.";
return false;
}
cdm_video_frame->FrameBuffer()->SetSize(space_required);
@@ -329,16 +328,8 @@ bool FFmpegCdmVideoDecoder::CopyAvFrameTo(cdm::VideoFrame* cdm_video_frame) {
void FFmpegCdmVideoDecoder::ReleaseFFmpegResources() {
DVLOG(1) << "ReleaseFFmpegResources()";
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- codec_context_ = NULL;
- }
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
+ codec_context_.reset();
+ av_frame_.reset();
}
} // namespace media
diff --git a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h b/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h
index 17e2b5783bd..9ce87e6c860 100644
--- a/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/ffmpeg_cdm_video_decoder.h
@@ -7,17 +7,21 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
-#include "media/cdm/ppapi/api/content_decryption_module.h"
+#include "base/memory/scoped_ptr.h"
#include "media/cdm/ppapi/cdm_video_decoder.h"
+#include "media/cdm/ppapi/clear_key_cdm_common.h"
struct AVCodecContext;
struct AVFrame;
namespace media {
+class ScopedPtrAVFreeContext;
+class ScopedPtrAVFreeFrame;
+
class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
public:
- explicit FFmpegCdmVideoDecoder(cdm::Host* host);
+ explicit FFmpegCdmVideoDecoder(ClearKeyCdmHost* host);
virtual ~FFmpegCdmVideoDecoder();
// CdmVideoDecoder implementation.
@@ -43,12 +47,12 @@ class FFmpegCdmVideoDecoder : public CdmVideoDecoder {
void ReleaseFFmpegResources();
// FFmpeg structures owned by this object.
- AVCodecContext* codec_context_;
- AVFrame* av_frame_;
+ scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
bool is_initialized_;
- cdm::Host* const host_;
+ ClearKeyCdmHost* const host_;
DISALLOW_COPY_AND_ASSIGN(FFmpegCdmVideoDecoder);
};
diff --git a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc
index a81c48558d4..cb8f19e9306 100644
--- a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.cc
@@ -27,7 +27,7 @@ namespace media {
static const int kDecodeThreads = 2;
-LibvpxCdmVideoDecoder::LibvpxCdmVideoDecoder(cdm::Host* host)
+LibvpxCdmVideoDecoder::LibvpxCdmVideoDecoder(CdmHost* host)
: is_initialized_(false),
host_(host),
vpx_codec_(NULL),
@@ -155,7 +155,7 @@ bool LibvpxCdmVideoDecoder::CopyVpxImageTo(cdm::VideoFrame* cdm_video_frame) {
DCHECK(!cdm_video_frame->FrameBuffer());
cdm_video_frame->SetFrameBuffer(host_->Allocate(space_required));
if (!cdm_video_frame->FrameBuffer()) {
- LOG(ERROR) << "CopyVpxImageTo() cdm::Host::Allocate failed.";
+ LOG(ERROR) << "CopyVpxImageTo() CdmHost::Allocate failed.";
return false;
}
cdm_video_frame->FrameBuffer()->SetSize(space_required);
diff --git a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h b/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h
index 3edaa076225..d3ad264e638 100644
--- a/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h
+++ b/chromium/media/cdm/ppapi/libvpx_cdm_video_decoder.h
@@ -17,7 +17,7 @@ namespace media {
class LibvpxCdmVideoDecoder : public CdmVideoDecoder {
public:
- explicit LibvpxCdmVideoDecoder(cdm::Host* host);
+ explicit LibvpxCdmVideoDecoder(CdmHost* host);
virtual ~LibvpxCdmVideoDecoder();
// CdmVideoDecoder implementation.
@@ -42,7 +42,7 @@ class LibvpxCdmVideoDecoder : public CdmVideoDecoder {
bool is_initialized_;
- cdm::Host* const host_;
+ CdmHost* const host_;
vpx_codec_ctx* vpx_codec_;
vpx_image* vpx_image_;
diff --git a/chromium/media/cdm/ppapi/linked_ptr.h b/chromium/media/cdm/ppapi/linked_ptr.h
index f3eccbbcf0e..1e47a03771d 100644
--- a/chromium/media/cdm/ppapi/linked_ptr.h
+++ b/chromium/media/cdm/ppapi/linked_ptr.h
@@ -115,6 +115,7 @@ class linked_ptr {
capture(ptr);
}
T* get() const { return value_; }
+ operator T*() const { return value_; }
T* operator->() const { return value_; }
T& operator*() const { return *value_; }
// Release ownership of the pointed object and returns it.
diff --git a/chromium/media/cdm/ppapi/supported_cdm_versions.h b/chromium/media/cdm/ppapi/supported_cdm_versions.h
new file mode 100644
index 00000000000..04723d8e5eb
--- /dev/null
+++ b/chromium/media/cdm/ppapi/supported_cdm_versions.h
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_PPAPI_SUPPORTED_CDM_VERSIONS_H_
+#define MEDIA_CDM_PPAPI_SUPPORTED_CDM_VERSIONS_H_
+
+#include "media/cdm/ppapi/api/content_decryption_module.h"
+
+namespace media {
+
+bool IsSupportedCdmModuleVersion(int version) {
+ switch(version) {
+ // Latest.
+ case CDM_MODULE_VERSION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsSupportedCdmInterfaceVersion(int version) {
+ COMPILE_ASSERT(cdm::ContentDecryptionModule::kVersion ==
+ cdm::ContentDecryptionModule_3::kVersion,
+ update_code_below);
+ switch(version) {
+ // Supported versions in decreasing order.
+ case cdm::ContentDecryptionModule_3::kVersion:
+ case cdm::ContentDecryptionModule_2::kVersion:
+ case cdm::ContentDecryptionModule_1::kVersion:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsSupportedCdmHostVersion(int version) {
+ COMPILE_ASSERT(cdm::ContentDecryptionModule::Host::kVersion ==
+ cdm::ContentDecryptionModule_3::Host::kVersion,
+ update_code_below);
+ switch(version) {
+ // Supported versions in decreasing order.
+ case cdm::Host_3::kVersion:
+ case cdm::Host_2::kVersion:
+ case cdm::Host_1::kVersion:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace media
+
+#endif // MEDIA_CDM_PPAPI_SUPPORTED_CDM_VERSIONS_H_
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 72b31252f85..6e7bd155cd8 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -6,6 +6,8 @@
#include "base/basictypes.h"
#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_number_conversions.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
@@ -83,6 +85,8 @@ static AudioCodec CodecIDToAudioCodec(AVCodecID codec_id) {
return kCodecAMR_WB;
case AV_CODEC_ID_GSM_MS:
return kCodecGSM_MS;
+ case AV_CODEC_ID_PCM_ALAW:
+ return kCodecPCM_ALAW;
case AV_CODEC_ID_PCM_MULAW:
return kCodecPCM_MULAW;
case AV_CODEC_ID_OPUS:
@@ -128,6 +132,8 @@ static AVCodecID AudioCodecToCodecID(AudioCodec audio_codec,
return AV_CODEC_ID_AMR_WB;
case kCodecGSM_MS:
return AV_CODEC_ID_GSM_MS;
+ case kCodecPCM_ALAW:
+ return AV_CODEC_ID_PCM_ALAW;
case kCodecPCM_MULAW:
return AV_CODEC_ID_PCM_MULAW;
case kCodecOpus:
@@ -280,8 +286,21 @@ static void AVCodecContextToAudioDecoderConfig(
if (codec == kCodecOpus) {
// |codec_context->sample_fmt| is not set by FFmpeg because Opus decoding is
- // not enabled in FFmpeg, so we need to manually set the sample format.
- sample_format = kSampleFormatS16;
+ // not enabled in FFmpeg. It doesn't matter what value is set here, so long
+ // as it's valid, the true sample format is selected inside the decoder.
+ sample_format = kSampleFormatF32;
+ }
+
+ base::TimeDelta seek_preroll;
+ if (codec_context->seek_preroll > 0) {
+ seek_preroll = base::TimeDelta::FromMicroseconds(
+ codec_context->seek_preroll * 1000000.0 / codec_context->sample_rate);
+ }
+
+ base::TimeDelta codec_delay;
+ if (codec_context->delay > 0) {
+ codec_delay = base::TimeDelta::FromMicroseconds(
+ codec_context->delay * 1000000.0 / codec_context->sample_rate);
}
config->Initialize(codec,
@@ -292,8 +311,8 @@ static void AVCodecContextToAudioDecoderConfig(
codec_context->extradata_size,
is_encrypted,
record_stats,
- base::TimeDelta(),
- base::TimeDelta());
+ seek_preroll,
+ codec_delay);
if (codec != kCodecOpus) {
DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
config->bits_per_channel());
@@ -369,6 +388,12 @@ void AVStreamToVideoDecoderConfig(
gfx::Size natural_size = GetNaturalSize(
visible_rect.size(), aspect_ratio.num, aspect_ratio.den);
+ if (record_stats) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange",
+ stream->codec->color_range,
+ AVCOL_RANGE_NB);
+ }
+
VideoFrame::Format format = PixelFormatToVideoFormat(stream->codec->pix_fmt);
if (codec == kCodecVP9) {
// TODO(tomfinegan): libavcodec doesn't know about VP9.
@@ -489,18 +514,16 @@ VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
switch (pixel_format) {
case PIX_FMT_YUV422P:
return VideoFrame::YV16;
- // TODO(scherkus): We should be paying attention to the color range of each
- // format and scaling as appropriate when rendering. Regular YUV has a range
- // of 16-239 where as YUVJ has a range of 0-255.
case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
return VideoFrame::YV12;
+ case PIX_FMT_YUVJ420P:
+ return VideoFrame::YV12J;
case PIX_FMT_YUVA420P:
return VideoFrame::YV12A;
default:
DVLOG(1) << "Unsupported PixelFormat: " << pixel_format;
}
- return VideoFrame::INVALID;
+ return VideoFrame::UNKNOWN;
}
PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
@@ -509,6 +532,8 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
return PIX_FMT_YUV422P;
case VideoFrame::YV12:
return PIX_FMT_YUV420P;
+ case VideoFrame::YV12J:
+ return PIX_FMT_YUVJ420P;
case VideoFrame::YV12A:
return PIX_FMT_YUVA420P;
default:
diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
index ccd2aa59756..9a98c85aa79 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.h
+++ b/chromium/media/ffmpeg/ffmpeg_common.h
@@ -58,6 +58,28 @@ class ScopedPtrAVFreePacket {
}
};
+// Frees an AVCodecContext object in a class that can be passed as a Deleter
+// argument to scoped_ptr_malloc.
+class ScopedPtrAVFreeContext {
+ public:
+ inline void operator()(void* x) const {
+ AVCodecContext* codec_context = static_cast<AVCodecContext*>(x);
+ av_free(codec_context->extradata);
+ avcodec_close(codec_context);
+ av_free(codec_context);
+ }
+};
+
+// Frees an AVFrame object in a class that can be passed as a Deleter argument
+// to scoped_ptr_malloc.
+class ScopedPtrAVFreeFrame {
+ public:
+ inline void operator()(void* x) const {
+ AVFrame* frame = static_cast<AVFrame*>(x);
+ avcodec_free_frame(&frame);
+ }
+};
+
// Converts an int64 timestamp in |time_base| units to a base::TimeDelta.
// For example if |timestamp| equals 11025 and |time_base| equals {1, 44100}
// then the return value will be a base::TimeDelta for 0.25 seconds since that
diff --git a/chromium/media/ffmpeg/ffmpeg_unittest.cc b/chromium/media/ffmpeg/ffmpeg_unittest.cc
index 255d2aad47f..9f24845dc98 100644
--- a/chromium/media/ffmpeg/ffmpeg_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_unittest.cc
@@ -91,8 +91,8 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
duration_(AV_NOPTS_VALUE) {
InitializeFFmpeg();
- audio_buffer_.reset(avcodec_alloc_frame());
- video_buffer_.reset(avcodec_alloc_frame());
+ audio_buffer_.reset(av_frame_alloc());
+ video_buffer_.reset(av_frame_alloc());
}
virtual ~FFmpegTest() {
@@ -382,8 +382,8 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
AVPacketQueue audio_packets_;
AVPacketQueue video_packets_;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> audio_buffer_;
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_buffer_;
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> audio_buffer_;
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFreeFrame> video_buffer_;
int64 decoded_audio_time_;
int64 decoded_audio_duration_;
diff --git a/chromium/media/filters/audio_decoder_selector.cc b/chromium/media/filters/audio_decoder_selector.cc
index 5dbd5e32ca5..a08d3c79c3c 100644
--- a/chromium/media/filters/audio_decoder_selector.cc
+++ b/chromium/media/filters/audio_decoder_selector.cc
@@ -28,13 +28,15 @@ AudioDecoderSelector::AudioDecoderSelector(
weak_ptr_factory_(this) {
}
-AudioDecoderSelector::~AudioDecoderSelector() {}
+AudioDecoderSelector::~AudioDecoderSelector() {
+ DVLOG(2) << __FUNCTION__;
+}
void AudioDecoderSelector::SelectAudioDecoder(
DemuxerStream* stream,
const StatisticsCB& statistics_cb,
const SelectDecoderCB& select_decoder_cb) {
- DVLOG(2) << "SelectAudioDecoder()";
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(stream);
@@ -44,8 +46,7 @@ void AudioDecoderSelector::SelectAudioDecoder(
const AudioDecoderConfig& config = stream->audio_decoder_config();
if (!config.IsValidConfig()) {
DLOG(ERROR) << "Invalid audio stream config.";
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
@@ -53,14 +54,13 @@ void AudioDecoderSelector::SelectAudioDecoder(
statistics_cb_ = statistics_cb;
if (!config.is_encrypted()) {
- InitializeDecoder(decoders_.begin());
+ InitializeDecoder();
return;
}
// This could happen if Encrypted Media Extension (EME) is not enabled.
if (set_decryptor_ready_cb_.is_null()) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
@@ -69,14 +69,47 @@ void AudioDecoderSelector::SelectAudioDecoder(
audio_decoder_->Initialize(
input_stream_,
- BindToCurrentLoop(base::Bind(
- &AudioDecoderSelector::DecryptingAudioDecoderInitDone,
- weak_ptr_factory_.GetWeakPtr())),
+ base::Bind(&AudioDecoderSelector::DecryptingAudioDecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()),
statistics_cb_);
}
+void AudioDecoderSelector::Abort() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // This could happen when SelectAudioDecoder() was not called or when
+ // |select_decoder_cb_| was already posted but not fired (e.g. in the
+ // message loop queue).
+ if (select_decoder_cb_.is_null())
+ return;
+
+ // We must be trying to initialize the |audio_decoder_| or the
+ // |decrypted_stream_|. Invalid all weak pointers so that all initialization
+ // callbacks won't fire.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+
+ if (audio_decoder_) {
+ // AudioDecoder doesn't provide a Stop() method. Also, |decrypted_stream_|
+ // is either NULL or already initialized. We don't need to Stop()
+ // |decrypted_stream_| in either case.
+ ReturnNullDecoder();
+ return;
+ }
+
+ if (decrypted_stream_) {
+ decrypted_stream_->Stop(
+ base::Bind(&AudioDecoderSelector::ReturnNullDecoder,
+ weak_ptr_factory_.GetWeakPtr()));
+ return;
+ }
+
+ NOTREACHED();
+}
+
void AudioDecoderSelector::DecryptingAudioDecoderInitDone(
PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status == PIPELINE_OK) {
@@ -92,60 +125,63 @@ void AudioDecoderSelector::DecryptingAudioDecoderInitDone(
decrypted_stream_->Initialize(
input_stream_,
- BindToCurrentLoop(base::Bind(
- &AudioDecoderSelector::DecryptingDemuxerStreamInitDone,
- weak_ptr_factory_.GetWeakPtr())));
+ base::Bind(&AudioDecoderSelector::DecryptingDemuxerStreamInitDone,
+ weak_ptr_factory_.GetWeakPtr()));
}
void AudioDecoderSelector::DecryptingDemuxerStreamInitDone(
PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status != PIPELINE_OK) {
- decrypted_stream_.reset();
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ ReturnNullDecoder();
return;
}
DCHECK(!decrypted_stream_->audio_decoder_config().is_encrypted());
input_stream_ = decrypted_stream_.get();
- InitializeDecoder(decoders_.begin());
+ InitializeDecoder();
}
-void AudioDecoderSelector::InitializeDecoder(
- ScopedVector<AudioDecoder>::iterator iter) {
+void AudioDecoderSelector::InitializeDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!audio_decoder_);
- if (iter == decoders_.end()) {
- base::ResetAndReturn(&select_decoder_cb_).Run(
- scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+ if (decoders_.empty()) {
+ ReturnNullDecoder();
return;
}
- (*iter)->Initialize(
- input_stream_,
- BindToCurrentLoop(base::Bind(
- &AudioDecoderSelector::DecoderInitDone,
- weak_ptr_factory_.GetWeakPtr(),
- iter)),
- statistics_cb_);
+ audio_decoder_.reset(decoders_.front());
+ decoders_.weak_erase(decoders_.begin());
+
+ audio_decoder_->Initialize(input_stream_,
+ base::Bind(&AudioDecoderSelector::DecoderInitDone,
+ weak_ptr_factory_.GetWeakPtr()),
+ statistics_cb_);
}
-void AudioDecoderSelector::DecoderInitDone(
- ScopedVector<AudioDecoder>::iterator iter, PipelineStatus status) {
+void AudioDecoderSelector::DecoderInitDone(PipelineStatus status) {
+ DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
if (status != PIPELINE_OK) {
- InitializeDecoder(++iter);
+ audio_decoder_.reset();
+ InitializeDecoder();
return;
}
- scoped_ptr<AudioDecoder> audio_decoder(*iter);
- decoders_.weak_erase(iter);
-
- base::ResetAndReturn(&select_decoder_cb_).Run(audio_decoder.Pass(),
+ base::ResetAndReturn(&select_decoder_cb_).Run(audio_decoder_.Pass(),
decrypted_stream_.Pass());
}
+void AudioDecoderSelector::ReturnNullDecoder() {
+ DVLOG(2) << __FUNCTION__;
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ base::ResetAndReturn(&select_decoder_cb_).Run(
+ scoped_ptr<AudioDecoder>(), scoped_ptr<DecryptingDemuxerStream>());
+}
+
} // namespace media
diff --git a/chromium/media/filters/audio_decoder_selector.h b/chromium/media/filters/audio_decoder_selector.h
index b1ed6a291d8..338aa6c91f7 100644
--- a/chromium/media/filters/audio_decoder_selector.h
+++ b/chromium/media/filters/audio_decoder_selector.h
@@ -59,12 +59,16 @@ class MEDIA_EXPORT AudioDecoderSelector {
const StatisticsCB& statistics_cb,
const SelectDecoderCB& select_decoder_cb);
+ // Aborts pending AudioDecoder selection and fires |select_decoder_cb| with
+ // NULL and NULL immediately if it's pending.
+ void Abort();
+
private:
void DecryptingAudioDecoderInitDone(PipelineStatus status);
void DecryptingDemuxerStreamInitDone(PipelineStatus status);
- void InitializeDecoder(ScopedVector<AudioDecoder>::iterator iter);
- void DecoderInitDone(ScopedVector<AudioDecoder>::iterator iter,
- PipelineStatus status);
+ void InitializeDecoder();
+ void DecoderInitDone(PipelineStatus status);
+ void ReturnNullDecoder();
scoped_refptr<base::MessageLoopProxy> message_loop_;
ScopedVector<AudioDecoder> decoders_;
diff --git a/chromium/media/filters/audio_decoder_selector_unittest.cc b/chromium/media/filters/audio_decoder_selector_unittest.cc
index 512502ceae6..be2daff6385 100644
--- a/chromium/media/filters/audio_decoder_selector_unittest.cc
+++ b/chromium/media/filters/audio_decoder_selector_unittest.cc
@@ -25,6 +25,10 @@ class AudioDecoderSelectorTest : public ::testing::Test {
public:
enum DecryptorCapability {
kNoDecryptor,
+ // Used to test Abort() during DecryptingAudioDecoder::Initialize() and
+ // DecryptingDemuxerStream::Initialize(). We don't need this for normal
+ // AudioDecoders since we use MockAudioDecoder.
+ kHoldSetDecryptor,
kDecryptOnly,
kDecryptAndDecode
};
@@ -67,11 +71,14 @@ class AudioDecoderSelectorTest : public ::testing::Test {
void InitializeDecoderSelector(DecryptorCapability decryptor_capability,
int num_decoders) {
SetDecryptorReadyCB set_decryptor_ready_cb;
+ if (decryptor_capability != kNoDecryptor) {
+ set_decryptor_ready_cb =
+ base::Bind(&AudioDecoderSelectorTest::SetDecryptorReadyCallback,
+ base::Unretained(this));
+ }
+
if (decryptor_capability == kDecryptOnly ||
decryptor_capability == kDecryptAndDecode) {
- set_decryptor_ready_cb = base::Bind(
- &AudioDecoderSelectorTest::SetDecryptorReadyCallback,
- base::Unretained(this));
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
.WillRepeatedly(RunCallback<0>(decryptor_.get()));
@@ -83,6 +90,9 @@ class AudioDecoderSelectorTest : public ::testing::Test {
EXPECT_CALL(*decryptor_, InitializeAudioDecoder(_, _))
.WillRepeatedly(RunCallback<1>(true));
}
+ } else if (decryptor_capability == kHoldSetDecryptor) {
+ // Set DecryptorReadyCB but the callback is never fired.
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_));
}
DCHECK_GE(all_decoders_.size(), static_cast<size_t>(num_decoders));
@@ -105,6 +115,14 @@ class AudioDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
+ void SelectDecoderAndAbort() {
+ SelectDecoder();
+
+ EXPECT_CALL(*this, OnDecoderSelected(IsNull(), IsNull()));
+ decoder_selector_->Abort();
+ message_loop_.RunUntilIdle();
+ }
+
// Fixture members.
scoped_ptr<AudioDecoderSelector> decoder_selector_;
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
@@ -144,6 +162,16 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest,
+ Abort_ClearStream_NoDecryptor_OneClearDecoder) {
+ UseClearStream();
+ InitializeDecoderSelector(kNoDecryptor, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+
+ SelectDecoderAndAbort();
+}
+
// The stream is not encrypted and we have multiple clear decoders. The first
// decoder that can decode the input stream will be selected.
TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
@@ -159,6 +187,18 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest,
+ Abort_ClearStream_NoDecryptor_MultipleClearDecoder) {
+ UseClearStream();
+ InitializeDecoderSelector(kNoDecryptor, 2);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
+ .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
+
+ SelectDecoderAndAbort();
+}
+
// There is a decryptor but the stream is not encrypted. The decoder will be
// selected.
TEST_F(AudioDecoderSelectorTest, ClearStream_HasDecryptor) {
@@ -172,6 +212,15 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_HasDecryptor) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
+ UseClearStream();
+ InitializeDecoderSelector(kDecryptOnly, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+
+ SelectDecoderAndAbort();
+}
+
// The stream is encrypted and there's no decryptor. No decoder can be selected.
TEST_F(AudioDecoderSelectorTest, EncryptedStream_NoDecryptor) {
UseEncryptedStream();
@@ -193,6 +242,14 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_NoClearDecoder) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_NoClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kHoldSetDecryptor, 0);
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can do decryption-only and there's a decoder available. The decoder
// will be selected and a DecryptingDemuxerStream will be created.
TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
@@ -206,6 +263,16 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_OneClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kDecryptOnly, 1);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can only do decryption and there are multiple decoders available.
// The first decoder that can decode the input stream will be selected and
// a DecryptingDemuxerStream will be created.
@@ -223,6 +290,18 @@ TEST_F(AudioDecoderSelectorTest,
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest,
+ Abort_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kDecryptOnly, 2);
+
+ EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
+ .WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
+ EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
+
+ SelectDecoderAndAbort();
+}
+
// Decryptor can do decryption and decoding. A DecryptingAudioDecoder will be
// created and selected. The clear decoders should not be touched at all.
// No DecryptingDemuxerStream should to be created.
@@ -235,4 +314,11 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
SelectDecoder();
}
+TEST_F(AudioDecoderSelectorTest, Abort_EncryptedStream_DecryptAndDecode) {
+ UseEncryptedStream();
+ InitializeDecoderSelector(kHoldSetDecryptor, 1);
+
+ SelectDecoderAndAbort();
+}
+
} // namespace media
diff --git a/chromium/media/filters/audio_file_reader.cc b/chromium/media/filters/audio_file_reader.cc
index ba1d5513e69..092c8f5329d 100644
--- a/chromium/media/filters/audio_file_reader.cc
+++ b/chromium/media/filters/audio_file_reader.cc
@@ -114,10 +114,10 @@ bool AudioFileReader::Open() {
}
void AudioFileReader::Close() {
- if (codec_context_) {
- avcodec_close(codec_context_);
- codec_context_ = NULL;
- }
+ // |codec_context_| is a stream inside glue_->format_context(), so it is
+ // closed when |glue_| is disposed.
+ glue_.reset();
+ codec_context_ = NULL;
}
int AudioFileReader::Read(AudioBus* audio_bus) {
@@ -131,7 +131,8 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
size_t bytes_per_sample = av_get_bytes_per_sample(codec_context_->sample_fmt);
// Holds decoded audio.
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame(avcodec_alloc_frame());
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame(
+ av_frame_alloc());
// Read until we hit EOF or we've read the requested number of frames.
AVPacket packet;
diff --git a/chromium/media/filters/audio_renderer_algorithm.cc b/chromium/media/filters/audio_renderer_algorithm.cc
index 572e2630a3d..e73ce65569c 100644
--- a/chromium/media/filters/audio_renderer_algorithm.cc
+++ b/chromium/media/filters/audio_renderer_algorithm.cc
@@ -9,9 +9,9 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
+#include "media/base/limits.h"
#include "media/filters/wsola_internals.h"
namespace media {
@@ -46,10 +46,6 @@ namespace media {
// |search_block_index_| = |search_block_center_offset_| -
// |search_block_center_offset_|.
-// The maximum size in frames for the |audio_buffer_|. Arbitrarily determined.
-// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound.
-static const int kMaxBufferSizeInFrames = 3 * 96000;
-
// Max/min supported playback rates for fast/slow audio. Audio outside of these
// ranges are muted.
// Audio at these speeds would sound better under a frequency domain algorithm.
@@ -64,11 +60,18 @@ static const int kOlaWindowSizeMs = 20;
// interval is 2 * delta.
static const int kWsolaSearchIntervalMs = 30;
+// The maximum size in seconds for the |audio_buffer_|. Arbitrarily determined.
+static const int kMaxCapacityInSeconds = 3;
+
// The starting size in frames for |audio_buffer_|. Previous usage maintained a
// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
// maintain this number of frames.
static const int kStartingBufferSizeInFrames = 16 * 512;
+COMPILE_ASSERT(kStartingBufferSizeInFrames <
+ (kMaxCapacityInSeconds * limits::kMinSampleRate),
+ max_capacity_smaller_than_starting_buffer_size);
+
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
samples_per_second_(0),
@@ -209,6 +212,10 @@ void AudioRendererAlgorithm::FlushBuffers() {
target_block_index_ = 0;
wsola_output_->Zero();
num_complete_frames_ = 0;
+
+ // Reset |capacity_| so growth triggered by underflows doesn't penalize
+ // seek time.
+ capacity_ = kStartingBufferSizeInFrames;
}
base::TimeDelta AudioRendererAlgorithm::GetTime() {
@@ -226,7 +233,10 @@ bool AudioRendererAlgorithm::IsQueueFull() {
}
void AudioRendererAlgorithm::IncreaseQueueCapacity() {
- capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
+ int max_capacity = kMaxCapacityInSeconds * samples_per_second_;
+ DCHECK_LE(capacity_, max_capacity);
+
+ capacity_ = std::min(2 * capacity_, max_capacity);
}
bool AudioRendererAlgorithm::CanPerformWsola() const {
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index 649e0588498..aab4a9d8aca 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -26,10 +26,9 @@
namespace media {
-static const int kFrameSize = 250;
-static const int kSamplesPerSecond = 3000;
-static const SampleFormat kSampleFormat = kSampleFormatS16;
-static const int kOutputDurationInSec = 10;
+const int kFrameSize = 250;
+const int kSamplesPerSecond = 3000;
+const int kOutputDurationInSec = 10;
static void FillWithSquarePulseTrain(
int half_pulse_width, int offset, int num_samples, float* data) {
diff --git a/chromium/media/filters/audio_renderer_impl.cc b/chromium/media/filters/audio_renderer_impl.cc
index bcf3cb712e0..2df537d8831 100644
--- a/chromium/media/filters/audio_renderer_impl.cc
+++ b/chromium/media/filters/audio_renderer_impl.cc
@@ -14,7 +14,6 @@
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
-#include "media/audio/audio_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_splicer.h"
#include "media/base/bind_to_loop.h"
@@ -42,8 +41,7 @@ AudioRendererImpl::AudioRendererImpl(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
media::AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- bool increase_preroll_on_underflow)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb)
: message_loop_(message_loop),
weak_factory_(this),
sink_(sink),
@@ -58,7 +56,6 @@ AudioRendererImpl::AudioRendererImpl(
audio_time_buffered_(kNoTimestamp()),
current_time_(kNoTimestamp()),
underflow_disabled_(false),
- increase_preroll_on_underflow_(increase_preroll_on_underflow),
preroll_aborted_(false) {
}
@@ -71,29 +68,30 @@ AudioRendererImpl::~AudioRendererImpl() {
void AudioRendererImpl::Play(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
- {
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kPaused);
- state_ = kPlaying;
- callback.Run();
- earliest_end_time_ = now_cb_.Run();
- }
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kPaused);
+ ChangeState_Locked(kPlaying);
+ callback.Run();
+ earliest_end_time_ = now_cb_.Run();
if (algorithm_->playback_rate() != 0)
- DoPlay();
+ DoPlay_Locked();
else
DCHECK(!sink_playing_);
}
-void AudioRendererImpl::DoPlay() {
+void AudioRendererImpl::DoPlay_Locked() {
DCHECK(message_loop_->BelongsToCurrentThread());
- {
- base::AutoLock auto_lock(lock_);
- earliest_end_time_ = now_cb_.Run();
- }
+ lock_.AssertAcquired();
+ earliest_end_time_ = now_cb_.Run();
+
+ if ((state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow) &&
+ !sink_playing_) {
+ {
+ base::AutoUnlock auto_unlock(lock_);
+ sink_->Play();
+ }
- if (state_ == kPlaying && !sink_playing_) {
- sink_->Play();
sink_playing_ = true;
}
}
@@ -101,25 +99,25 @@ void AudioRendererImpl::DoPlay() {
void AudioRendererImpl::Pause(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
- {
- base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPlaying || state_ == kUnderflow ||
- state_ == kRebuffering) << "state_ == " << state_;
- pause_cb_ = callback;
- state_ = kPaused;
-
- // Pause only when we've completed our pending read.
- if (!pending_read_)
- base::ResetAndReturn(&pause_cb_).Run();
- }
+ base::AutoLock auto_lock(lock_);
+ DCHECK(state_ == kPlaying || state_ == kUnderflow ||
+ state_ == kRebuffering) << "state_ == " << state_;
+ ChangeState_Locked(kPaused);
- DoPause();
+ DoPause_Locked();
+
+ callback.Run();
}
-void AudioRendererImpl::DoPause() {
+void AudioRendererImpl::DoPause_Locked() {
DCHECK(message_loop_->BelongsToCurrentThread());
+ lock_.AssertAcquired();
+
if (sink_playing_) {
- sink_->Pause();
+ {
+ base::AutoUnlock auto_unlock(lock_);
+ sink_->Pause();
+ }
sink_playing_ = false;
}
}
@@ -127,18 +125,61 @@ void AudioRendererImpl::DoPause() {
void AudioRendererImpl::Flush(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kPaused);
+ DCHECK(flush_cb_.is_null());
+
+ flush_cb_ = callback;
+
+ if (pending_read_) {
+ ChangeState_Locked(kFlushing);
+ return;
+ }
+
+ DoFlush_Locked();
+}
+
+void AudioRendererImpl::DoFlush_Locked() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ lock_.AssertAcquired();
+
+ DCHECK(!pending_read_);
+ DCHECK_EQ(state_, kPaused);
+
if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Reset(base::Bind(
- &AudioRendererImpl::ResetDecoder, weak_this_, callback));
+ decrypting_demuxer_stream_->Reset(BindToCurrentLoop(
+ base::Bind(&AudioRendererImpl::ResetDecoder, weak_this_)));
return;
}
- decoder_->Reset(callback);
+ ResetDecoder();
}
-void AudioRendererImpl::ResetDecoder(const base::Closure& callback) {
+void AudioRendererImpl::ResetDecoder() {
DCHECK(message_loop_->BelongsToCurrentThread());
- decoder_->Reset(callback);
+ decoder_->Reset(BindToCurrentLoop(
+ base::Bind(&AudioRendererImpl::ResetDecoderDone, weak_this_)));
+}
+
+void AudioRendererImpl::ResetDecoderDone() {
+ base::AutoLock auto_lock(lock_);
+ if (state_ == kStopped)
+ return;
+
+ DCHECK_EQ(state_, kPaused);
+ DCHECK(!flush_cb_.is_null());
+
+ audio_time_buffered_ = kNoTimestamp();
+ current_time_ = kNoTimestamp();
+ received_end_of_stream_ = false;
+ rendered_end_of_stream_ = false;
+ preroll_aborted_ = false;
+
+ earliest_end_time_ = now_cb_.Run();
+ splicer_->Reset();
+ algorithm_->FlushBuffers();
+
+ base::ResetAndReturn(&flush_cb_).Run();
}
void AudioRendererImpl::Stop(const base::Closure& callback) {
@@ -148,18 +189,19 @@ void AudioRendererImpl::Stop(const base::Closure& callback) {
// TODO(scherkus): Consider invalidating |weak_factory_| and replacing
// task-running guards that check |state_| with DCHECK().
- if (sink_.get()) {
+ if (sink_) {
sink_->Stop();
sink_ = NULL;
}
{
base::AutoLock auto_lock(lock_);
- state_ = kStopped;
+ ChangeState_Locked(kStopped);
algorithm_.reset(NULL);
init_cb_.Reset();
underflow_cb_.Reset();
time_cb_.Reset();
+ flush_cb_.Reset();
}
callback.Run();
@@ -173,24 +215,12 @@ void AudioRendererImpl::Preroll(base::TimeDelta time,
DCHECK(!sink_playing_);
DCHECK_EQ(state_, kPaused);
DCHECK(!pending_read_) << "Pending read must complete before seeking";
- DCHECK(pause_cb_.is_null());
DCHECK(preroll_cb_.is_null());
- state_ = kPrerolling;
+ ChangeState_Locked(kPrerolling);
preroll_cb_ = cb;
preroll_timestamp_ = time;
- // Throw away everything and schedule our reads.
- audio_time_buffered_ = kNoTimestamp();
- current_time_ = kNoTimestamp();
- received_end_of_stream_ = false;
- rendered_end_of_stream_ = false;
- preroll_aborted_ = false;
-
- splicer_->Reset();
- algorithm_->FlushBuffers();
- earliest_end_time_ = now_cb_.Run();
-
AttemptRead_Locked();
}
@@ -213,7 +243,7 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
DCHECK(!disabled_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
- DCHECK(sink_.get());
+ DCHECK(sink_);
weak_this_ = weak_factory_.GetWeakPtr();
init_cb_ = init_cb;
@@ -234,10 +264,12 @@ void AudioRendererImpl::OnDecoderSelected(
scoped_ptr<AudioDecoder> decoder,
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
DCHECK(message_loop_->BelongsToCurrentThread());
+
+ base::AutoLock auto_lock(lock_);
scoped_ptr<AudioDecoderSelector> deleter(decoder_selector_.Pass());
if (state_ == kStopped) {
- DCHECK(!sink_.get());
+ DCHECK(!sink_);
return;
}
@@ -269,15 +301,19 @@ void AudioRendererImpl::OnDecoderSelected(
algorithm_.reset(new AudioRendererAlgorithm());
algorithm_->Initialize(0, audio_parameters_);
- state_ = kPaused;
+ ChangeState_Locked(kPaused);
HistogramRendererEvent(INITIALIZED);
- sink_->Initialize(audio_parameters_, weak_this_.get());
- sink_->Start();
+ {
+ base::AutoUnlock auto_unlock(lock_);
+ sink_->Initialize(audio_parameters_, weak_this_.get());
+ sink_->Start();
+
+ // Some sinks play on start...
+ sink_->Pause();
+ }
- // Some sinks play on start...
- sink_->Pause();
DCHECK(!sink_playing_);
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
@@ -287,33 +323,33 @@ void AudioRendererImpl::ResumeAfterUnderflow() {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kUnderflow) {
- // The "&& preroll_aborted_" is a hack. If preroll is aborted, then we
+ // The "!preroll_aborted_" is a hack. If preroll is aborted, then we
// shouldn't even reach the kUnderflow state to begin with. But for now
// we're just making sure that the audio buffer capacity (i.e. the
// number of bytes that need to be buffered for preroll to complete)
// does not increase due to an aborted preroll.
// TODO(vrk): Fix this bug correctly! (crbug.com/151352)
- if (increase_preroll_on_underflow_ && !preroll_aborted_)
+ if (!preroll_aborted_)
algorithm_->IncreaseQueueCapacity();
- state_ = kRebuffering;
+ ChangeState_Locked(kRebuffering);
}
}
void AudioRendererImpl::SetVolume(float volume) {
DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(sink_.get());
+ DCHECK(sink_);
sink_->SetVolume(volume);
}
void AudioRendererImpl::DecodedAudioReady(
AudioDecoder::Status status,
const scoped_refptr<AudioBuffer>& buffer) {
+ DVLOG(1) << __FUNCTION__ << "(" << status << ")";
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPaused || state_ == kPrerolling || state_ == kPlaying ||
- state_ == kUnderflow || state_ == kRebuffering || state_ == kStopped);
+ DCHECK(state_ != kUninitialized);
CHECK(pending_read_);
pending_read_ = false;
@@ -331,6 +367,12 @@ void AudioRendererImpl::DecodedAudioReady(
DCHECK_EQ(status, AudioDecoder::kOk);
DCHECK(buffer.get());
+ if (state_ == kFlushing) {
+ ChangeState_Locked(kPaused);
+ DoFlush_Locked();
+ return;
+ }
+
if (!splicer_->AddInput(buffer)) {
HandleAbortedReadOrDecodeError(true);
return;
@@ -359,37 +401,56 @@ bool AudioRendererImpl::HandleSplicerBuffer(
// Transition to kPlaying if we are currently handling an underflow since
// no more data will be arriving.
if (state_ == kUnderflow || state_ == kRebuffering)
- state_ = kPlaying;
+ ChangeState_Locked(kPlaying);
+ } else {
+ if (state_ == kPrerolling) {
+ if (IsBeforePrerollTime(buffer))
+ return true;
+
+ // Trim off any additional time before the preroll timestamp.
+ const base::TimeDelta trim_time =
+ preroll_timestamp_ - buffer->timestamp();
+ if (trim_time > base::TimeDelta()) {
+ buffer->TrimStart(buffer->frame_count() *
+ (static_cast<double>(trim_time.InMicroseconds()) /
+ buffer->duration().InMicroseconds()));
+ }
+ // If the entire buffer was trimmed, request a new one.
+ if (!buffer->frame_count())
+ return true;
+ }
+
+ if (state_ != kUninitialized && state_ != kStopped)
+ algorithm_->EnqueueBuffer(buffer);
}
switch (state_) {
case kUninitialized:
+ case kFlushing:
NOTREACHED();
return false;
+
case kPaused:
- if (!buffer->end_of_stream())
- algorithm_->EnqueueBuffer(buffer);
DCHECK(!pending_read_);
- base::ResetAndReturn(&pause_cb_).Run();
return false;
+
case kPrerolling:
- if (IsBeforePrerollTime(buffer))
+ if (!buffer->end_of_stream() && !algorithm_->IsQueueFull())
return true;
-
- if (!buffer->end_of_stream()) {
- algorithm_->EnqueueBuffer(buffer);
- if (!algorithm_->IsQueueFull())
- return false;
- }
- state_ = kPaused;
+ ChangeState_Locked(kPaused);
base::ResetAndReturn(&preroll_cb_).Run(PIPELINE_OK);
return false;
+
case kPlaying:
case kUnderflow:
+ return false;
+
case kRebuffering:
- if (!buffer->end_of_stream())
- algorithm_->EnqueueBuffer(buffer);
+ if (!algorithm_->IsQueueFull())
+ return true;
+ ChangeState_Locked(kPlaying);
return false;
+
case kStopped:
return false;
}
@@ -418,6 +479,7 @@ bool AudioRendererImpl::CanRead_Locked() {
switch (state_) {
case kUninitialized:
case kPaused:
+ case kFlushing:
case kStopped:
return false;
@@ -433,46 +495,41 @@ bool AudioRendererImpl::CanRead_Locked() {
}
void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
+ DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_GE(playback_rate, 0);
- DCHECK(sink_.get());
+ DCHECK(sink_);
+
+ base::AutoLock auto_lock(lock_);
// We have two cases here:
// Play: current_playback_rate == 0 && playback_rate != 0
// Pause: current_playback_rate != 0 && playback_rate == 0
float current_playback_rate = algorithm_->playback_rate();
if (current_playback_rate == 0 && playback_rate != 0)
- DoPlay();
+ DoPlay_Locked();
else if (current_playback_rate != 0 && playback_rate == 0)
- DoPause();
+ DoPause_Locked();
- base::AutoLock auto_lock(lock_);
algorithm_->SetPlaybackRate(playback_rate);
}
bool AudioRendererImpl::IsBeforePrerollTime(
const scoped_refptr<AudioBuffer>& buffer) {
- return (state_ == kPrerolling) && buffer.get() && !buffer->end_of_stream() &&
+ DCHECK_EQ(state_, kPrerolling);
+ return buffer && !buffer->end_of_stream() &&
(buffer->timestamp() + buffer->duration()) < preroll_timestamp_;
}
int AudioRendererImpl::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
- int frames_filled =
- FillBuffer(audio_bus, audio_bus->frames(), audio_delay_milliseconds);
- DCHECK_LE(frames_filled, audio_bus->frames());
- return frames_filled;
-}
-
-uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
- uint32 requested_frames,
- int audio_delay_milliseconds) {
+ const int requested_frames = audio_bus->frames();
base::TimeDelta current_time = kNoTimestamp();
base::TimeDelta max_time = kNoTimestamp();
base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds(
audio_delay_milliseconds);
- size_t frames_written = 0;
+ int frames_written = 0;
base::Closure underflow_cb;
{
base::AutoLock auto_lock(lock_);
@@ -485,9 +542,6 @@ uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
if (playback_rate == 0)
return 0;
- if (state_ == kRebuffering && algorithm_->IsQueueFull())
- state_ = kPlaying;
-
// Mute audio by returning 0 when not playing.
if (state_ != kPlaying)
return 0;
@@ -507,7 +561,7 @@ uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
// 3) We are in the kPlaying state
//
// Otherwise the buffer has data we can send to the device.
- frames_written = algorithm_->FillBuffer(dest, requested_frames);
+ frames_written = algorithm_->FillBuffer(audio_bus, requested_frames);
if (frames_written == 0) {
const base::TimeTicks now = now_cb_.Run();
@@ -517,7 +571,7 @@ uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
ended_cb_.Run();
} else if (!received_end_of_stream_ && state_ == kPlaying &&
!underflow_disabled_) {
- state_ = kUnderflow;
+ ChangeState_Locked(kUnderflow);
underflow_cb = underflow_cb_;
} else {
// We can't write any data this cycle. For example, we may have
@@ -568,25 +622,26 @@ uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
max_time = algorithm_->GetTime();
audio_time_buffered_ = max_time;
- UpdateEarliestEndTime_Locked(
- frames_written, playback_delay, now_cb_.Run());
+ if (frames_written > 0) {
+ UpdateEarliestEndTime_Locked(
+ frames_written, playback_delay, now_cb_.Run());
+ }
}
- if (current_time != kNoTimestamp() && max_time != kNoTimestamp()) {
+ if (current_time != kNoTimestamp() && max_time != kNoTimestamp())
time_cb_.Run(current_time, max_time);
- }
if (!underflow_cb.is_null())
underflow_cb.Run();
+ DCHECK_LE(frames_written, requested_frames);
return frames_written;
}
void AudioRendererImpl::UpdateEarliestEndTime_Locked(
int frames_filled, const base::TimeDelta& playback_delay,
const base::TimeTicks& time_now) {
- if (frames_filled <= 0)
- return;
+ DCHECK_GT(frames_filled, 0);
base::TimeDelta predicted_play_time = base::TimeDelta::FromMicroseconds(
static_cast<float>(frames_filled) * base::Time::kMicrosecondsPerSecond /
@@ -607,6 +662,8 @@ void AudioRendererImpl::DisableUnderflowForTesting() {
}
void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
+ lock_.AssertAcquired();
+
PipelineStatus status = is_decode_error ? PIPELINE_ERROR_DECODE : PIPELINE_OK;
switch (state_) {
case kUninitialized:
@@ -615,12 +672,22 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
case kPaused:
if (status != PIPELINE_OK)
error_cb_.Run(status);
- base::ResetAndReturn(&pause_cb_).Run();
+ return;
+ case kFlushing:
+ ChangeState_Locked(kPaused);
+
+ if (status == PIPELINE_OK) {
+ DoFlush_Locked();
+ return;
+ }
+
+ error_cb_.Run(status);
+ base::ResetAndReturn(&flush_cb_).Run();
return;
case kPrerolling:
// This is a signal for abort if it's not an error.
preroll_aborted_ = !is_decode_error;
- state_ = kPaused;
+ ChangeState_Locked(kPaused);
base::ResetAndReturn(&preroll_cb_).Run(status);
return;
case kPlaying:
@@ -633,4 +700,10 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
}
}
+void AudioRendererImpl::ChangeState_Locked(State new_state) {
+ DVLOG(1) << __FUNCTION__ << " : " << state_ << " -> " << new_state;
+ lock_.AssertAcquired();
+ state_ = new_state;
+}
+
} // namespace media
diff --git a/chromium/media/filters/audio_renderer_impl.h b/chromium/media/filters/audio_renderer_impl.h
index 56501fd3bcb..335a6c7a454 100644
--- a/chromium/media/filters/audio_renderer_impl.h
+++ b/chromium/media/filters/audio_renderer_impl.h
@@ -53,14 +53,10 @@ class MEDIA_EXPORT AudioRendererImpl
//
// |set_decryptor_ready_cb| is fired when the audio decryptor is available
// (only applicable if the stream is encrypted and we have a decryptor).
- //
- // |increase_preroll_on_underflow| Set to true if the preroll duration
- // should be increased when ResumeAfterUnderflow() is called.
AudioRendererImpl(const scoped_refptr<base::MessageLoopProxy>& message_loop,
AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb,
- bool increase_preroll_on_underflow);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb);
virtual ~AudioRendererImpl();
// AudioRenderer implementation.
@@ -97,6 +93,18 @@ class MEDIA_EXPORT AudioRendererImpl
private:
friend class AudioRendererImplTest;
+ // TODO(acolwell): Add a state machine graph.
+ enum State {
+ kUninitialized,
+ kPaused,
+ kFlushing,
+ kPrerolling,
+ kPlaying,
+ kStopped,
+ kUnderflow,
+ kRebuffering,
+ };
+
// Callback from the audio decoder delivering decoded audio samples.
void DecodedAudioReady(AudioDecoder::Status status,
const scoped_refptr<AudioBuffer>& buffer);
@@ -109,38 +117,33 @@ class MEDIA_EXPORT AudioRendererImpl
// DecodedAudioReady().
void HandleAbortedReadOrDecodeError(bool is_decode_error);
- // Fills the given buffer with audio data by delegating to its |algorithm_|.
- // FillBuffer() also takes care of updating the clock. Returns the number of
- // frames copied into |dest|, which may be less than or equal to
- // |requested_frames|.
- //
- // If this method returns fewer frames than |requested_frames|, it could
- // be a sign that the pipeline is stalled or unable to stream the data fast
- // enough. In such scenarios, the callee should zero out unused portions
- // of their buffer to playback silence.
- //
- // FillBuffer() updates the pipeline's playback timestamp. If FillBuffer() is
- // not called at the same rate as audio samples are played, then the reported
- // timestamp in the pipeline will be ahead of the actual audio playback. In
- // this case |playback_delay| should be used to indicate when in the future
- // should the filled buffer be played.
- //
- // Safe to call on any thread.
- uint32 FillBuffer(AudioBus* dest,
- uint32 requested_frames,
- int audio_delay_milliseconds);
-
// Estimate earliest time when current buffer can stop playing.
void UpdateEarliestEndTime_Locked(int frames_filled,
const base::TimeDelta& playback_delay,
const base::TimeTicks& time_now);
- void DoPlay();
- void DoPause();
+ void DoPlay_Locked();
+ void DoPause_Locked();
// AudioRendererSink::RenderCallback implementation.
//
// NOTE: These are called on the audio callback thread!
+ //
+ // Render() fills the given buffer with audio data by delegating to its
+ // |algorithm_|. Render() also takes care of updating the clock.
+ // Returns the number of frames copied into |audio_bus|, which may be less
+ // than or equal to the initial number of frames in |audio_bus|
+ //
+ // If this method returns fewer frames than the initial number of frames in
+ // |audio_bus|, it could be a sign that the pipeline is stalled or unable to
+ // stream the data fast enough. In such scenarios, the callee should zero out
+ // unused portions of their buffer to play back silence.
+ //
+ // Render() updates the pipeline's playback timestamp. If Render() is
+ // not called at the same rate as audio samples are played, then the reported
+ // timestamp in the pipeline will be ahead of the actual audio playback. In
+ // this case |audio_delay_milliseconds| should be used to indicate when in the
+ // future should the filled buffer be played.
virtual int Render(AudioBus* audio_bus,
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
@@ -152,6 +155,7 @@ class MEDIA_EXPORT AudioRendererImpl
void AttemptRead();
void AttemptRead_Locked();
bool CanRead_Locked();
+ void ChangeState_Locked(State new_state);
// Returns true if the data in the buffer is all before
// |preroll_timestamp_|. This can only return true while
@@ -167,7 +171,16 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioDecoder> decoder,
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
- void ResetDecoder(const base::Closure& callback);
+ // Used to initiate the flush operation once all pending reads have
+ // completed.
+ void DoFlush_Locked();
+
+ // Calls |decoder_|.Reset() and arranges for ResetDecoderDone() to get
+ // called when the reset completes.
+ void ResetDecoder();
+
+ // Called when the |decoder_|.Reset() has completed.
+ void ResetDecoderDone();
scoped_refptr<base::MessageLoopProxy> message_loop_;
base::WeakPtrFactory<AudioRendererImpl> weak_factory_;
@@ -198,8 +211,8 @@ class MEDIA_EXPORT AudioRendererImpl
base::Closure disabled_cb_;
PipelineStatusCB error_cb_;
- // Callback provided to Pause().
- base::Closure pause_cb_;
+ // Callback provided to Flush().
+ base::Closure flush_cb_;
// Callback provided to Preroll().
PipelineStatusCB preroll_cb_;
@@ -215,15 +228,6 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioRendererAlgorithm> algorithm_;
// Simple state tracking variable.
- enum State {
- kUninitialized,
- kPaused,
- kPrerolling,
- kPlaying,
- kStopped,
- kUnderflow,
- kRebuffering,
- };
State state_;
// Keep track of whether or not the sink is playing.
@@ -261,7 +265,6 @@ class MEDIA_EXPORT AudioRendererImpl
size_t total_frames_filled_;
bool underflow_disabled_;
- bool increase_preroll_on_underflow_;
// True if the renderer receives a buffer with kAborted status during preroll,
// false otherwise. This flag is cleared on the next Preroll() call.
diff --git a/chromium/media/filters/audio_renderer_impl_unittest.cc b/chromium/media/filters/audio_renderer_impl_unittest.cc
index 9e7f3e074ed..5adfbc499f7 100644
--- a/chromium/media/filters/audio_renderer_impl_unittest.cc
+++ b/chromium/media/filters/audio_renderer_impl_unittest.cc
@@ -11,8 +11,8 @@
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_timestamp_helper.h"
+#include "media/base/fake_audio_renderer_sink.h"
#include "media/base/gmock_callback_support.h"
-#include "media/base/mock_audio_renderer_sink.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/filters/audio_renderer_impl.h"
@@ -25,8 +25,6 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
-using ::testing::NiceMock;
-using ::testing::StrictMock;
namespace media {
@@ -42,6 +40,8 @@ static int kSamplesPerSecond = 44100;
static float kMutedAudio = 0.0f;
static float kPlayingAudio = 0.5f;
+static const int kDataSize = 1024;
+
class AudioRendererImplTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
@@ -61,6 +61,9 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Read(_))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::ReadDecoder));
+ EXPECT_CALL(*decoder_, Reset(_))
+ .WillRepeatedly(Invoke(this, &AudioRendererImplTest::ResetDecoder));
+
// Set up audio properties.
EXPECT_CALL(*decoder_, bits_per_channel())
.WillRepeatedly(Return(audio_config.bits_per_channel()));
@@ -71,13 +74,12 @@ class AudioRendererImplTest : public ::testing::Test {
ScopedVector<AudioDecoder> decoders;
decoders.push_back(decoder_);
-
+ sink_ = new FakeAudioRendererSink();
renderer_.reset(new AudioRendererImpl(
message_loop_.message_loop_proxy(),
- new NiceMock<MockAudioRendererSink>(),
+ sink_,
decoders.Pass(),
- SetDecryptorReadyCB(),
- false));
+ SetDecryptorReadyCB()));
// Stub out time.
renderer_->set_now_cb_for_testing(base::Bind(
@@ -119,7 +121,6 @@ class AudioRendererImplTest : public ::testing::Test {
void Initialize() {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
-
InitializeWithStatus(PIPELINE_OK);
next_timestamp_.reset(
@@ -150,6 +151,14 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_TRUE(read_cb_.is_null());
}
+ void Flush() {
+ WaitableMessageLoopEvent flush_event;
+ renderer_->Flush(flush_event.GetClosure());
+ flush_event.RunAndWait();
+
+ EXPECT_FALSE(IsReadPending());
+ }
+
void Preroll() {
Preroll(0, PIPELINE_OK);
}
@@ -179,11 +188,29 @@ class AudioRendererImplTest : public ::testing::Test {
event.RunAndWait();
}
+ void Pause() {
+ WaitableMessageLoopEvent pause_event;
+ renderer_->Pause(pause_event.GetClosure());
+ pause_event.RunAndWait();
+ }
+
+ void Seek() {
+ Pause();
+
+ Flush();
+
+ Preroll();
+ }
+
void WaitForEnded() {
SCOPED_TRACE("WaitForEnded()");
ended_event_.RunAndWait();
}
+ bool IsReadPending() const {
+ return !read_cb_.is_null();
+ }
+
void WaitForPendingRead() {
SCOPED_TRACE("WaitForPendingRead()");
if (!read_cb_.is_null())
@@ -200,7 +227,8 @@ class AudioRendererImplTest : public ::testing::Test {
}
// Delivers |size| frames with value kPlayingAudio to |renderer_|.
- void SatisfyPendingRead(size_t size) {
+ void SatisfyPendingRead(int size) {
+ CHECK_GT(size, 0);
CHECK(!read_cb_.is_null());
scoped_refptr<AudioBuffer> buffer =
@@ -236,10 +264,15 @@ class AudioRendererImplTest : public ::testing::Test {
//
// |muted| is optional and if passed will get set if the value of
// the consumed data is muted audio.
- bool ConsumeBufferedData(uint32 requested_frames, bool* muted) {
+ bool ConsumeBufferedData(int requested_frames, bool* muted) {
scoped_ptr<AudioBus> bus =
- AudioBus::Create(kChannels, std::max(requested_frames, 1u));
- uint32 frames_read = renderer_->FillBuffer(bus.get(), requested_frames, 0);
+ AudioBus::Create(kChannels, std::max(requested_frames, 1));
+ int frames_read;
+ if (!sink_->Render(bus.get(), 0, &frames_read)) {
+ if (muted)
+ *muted = true;
+ return false;
+ }
if (muted)
*muted = frames_read < 1 || bus->channel(0)[0] == kMutedAudio;
@@ -255,31 +288,30 @@ class AudioRendererImplTest : public ::testing::Test {
int frames_read = 0;
int total_frames_read = 0;
- const int kRequestFrames = 1024;
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kRequestFrames);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, 1024);
do {
TimeDelta audio_delay = TimeDelta::FromMicroseconds(
total_frames_read * Time::kMicrosecondsPerSecond /
static_cast<float>(decoder_->samples_per_second()));
- frames_read = renderer_->FillBuffer(
- bus.get(), kRequestFrames, audio_delay.InMilliseconds());
+ frames_read = renderer_->Render(
+ bus.get(), audio_delay.InMilliseconds());
total_frames_read += frames_read;
} while (frames_read > 0);
return total_frames_read;
}
- uint32 frames_buffered() {
+ int frames_buffered() {
return renderer_->algorithm_->frames_buffered();
}
- uint32 buffer_capacity() {
+ int buffer_capacity() {
return renderer_->algorithm_->QueueCapacity();
}
- uint32 frames_remaining_in_buffer() {
+ int frames_remaining_in_buffer() {
// This can happen if too much data was delivered, in which case the buffer
// will accept the data but not increase capacity.
if (frames_buffered() > buffer_capacity()) {
@@ -342,6 +374,7 @@ class AudioRendererImplTest : public ::testing::Test {
// Fixture members.
base::MessageLoop message_loop_;
scoped_ptr<AudioRendererImpl> renderer_;
+ scoped_refptr<FakeAudioRendererSink> sink_;
private:
TimeTicks GetTime() {
@@ -366,6 +399,13 @@ class AudioRendererImplTest : public ::testing::Test {
base::ResetAndReturn(&wait_for_pending_read_cb_).Run();
}
+ void ResetDecoder(const base::Closure& reset_cb) {
+ CHECK(read_cb_.is_null())
+ << "Reset overlapping with reads is not permitted";
+
+ message_loop_.PostTask(FROM_HERE, reset_cb);
+ }
+
void DeliverBuffer(AudioDecoder::Status status,
const scoped_refptr<AudioBuffer>& buffer) {
CHECK(!read_cb_.is_null());
@@ -435,6 +475,9 @@ TEST_F(AudioRendererImplTest, EndOfStream_SlowerPlaybackSpeed) {
TEST_F(AudioRendererImplTest, Underflow) {
Initialize();
Preroll();
+
+ int initial_capacity = buffer_capacity();
+
Play();
// Drain internal buffer, we should have a pending read.
@@ -443,7 +486,6 @@ TEST_F(AudioRendererImplTest, Underflow) {
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
- const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
@@ -451,16 +493,50 @@ TEST_F(AudioRendererImplTest, Underflow) {
// Verify after resuming that we're still not getting data.
bool muted = false;
- EXPECT_EQ(0u, frames_buffered());
+ EXPECT_EQ(0, frames_buffered());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
+ // Verify that the buffer capacity increased as a result of the underflow.
+ EXPECT_GT(buffer_capacity(), initial_capacity);
+
// Deliver data, we should get non-muted audio.
DeliverRemainingAudio();
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
}
+TEST_F(AudioRendererImplTest, Underflow_FollowedByFlush) {
+ Initialize();
+ Preroll();
+
+ int initial_capacity = buffer_capacity();
+
+ Play();
+
+ // Drain internal buffer, we should have a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ WaitForPendingRead();
+
+ // Verify the next FillBuffer() call triggers the underflow callback
+ // since the decoder hasn't delivered any data after it was drained.
+ EXPECT_CALL(*this, OnUnderflow());
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+
+ renderer_->ResumeAfterUnderflow();
+
+ // Verify that the buffer capacity increased as a result of the underflow.
+ EXPECT_GT(buffer_capacity(), initial_capacity);
+
+ // Deliver data to get the renderer out of the underflow/rebuffer state.
+ DeliverRemainingAudio();
+
+ Seek();
+
+ // Verify that the buffer capacity is restored to the |initial_capacity|.
+ EXPECT_EQ(buffer_capacity(), initial_capacity);
+}
+
TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
Initialize();
Preroll();
@@ -478,7 +554,6 @@ TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
- const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
@@ -516,14 +591,13 @@ TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
- const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow())
.WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
// Verify after resuming that we're still not getting data.
bool muted = false;
- EXPECT_EQ(0u, frames_buffered());
+ EXPECT_EQ(0, frames_buffered());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
@@ -533,6 +607,73 @@ TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
EXPECT_FALSE(muted);
}
+TEST_F(AudioRendererImplTest, Underflow_SetPlaybackRate) {
+ Initialize();
+ Preroll();
+ Play();
+
+ // Drain internal buffer, we should have a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ WaitForPendingRead();
+
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+
+ // Verify the next FillBuffer() call triggers the underflow callback
+ // since the decoder hasn't delivered any data after it was drained.
+ EXPECT_CALL(*this, OnUnderflow())
+ .WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+ EXPECT_EQ(0, frames_buffered());
+
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+
+ // Simulate playback being paused.
+ renderer_->SetPlaybackRate(0);
+
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+
+ // Deliver data to resolve the underflow.
+ DeliverRemainingAudio();
+
+ EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+
+ // Simulate playback being resumed.
+ renderer_->SetPlaybackRate(1);
+
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+}
+
+TEST_F(AudioRendererImplTest, Underflow_PausePlay) {
+ Initialize();
+ Preroll();
+ Play();
+
+ // Drain internal buffer, we should have a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ WaitForPendingRead();
+
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+
+ // Verify the next FillBuffer() call triggers the underflow callback
+ // since the decoder hasn't delivered any data after it was drained.
+ EXPECT_CALL(*this, OnUnderflow())
+ .WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+ EXPECT_EQ(0, frames_buffered());
+
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+
+ // Simulate playback being paused, and then played again.
+ renderer_->SetPlaybackRate(0.0);
+ renderer_->SetPlaybackRate(1.0);
+
+ // Deliver data to resolve the underflow.
+ DeliverRemainingAudio();
+
+ // We should have resumed playing now.
+ EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+}
+
TEST_F(AudioRendererImplTest, AbortPendingRead_Preroll) {
Initialize();
@@ -545,6 +686,8 @@ TEST_F(AudioRendererImplTest, AbortPendingRead_Preroll) {
AbortPendingRead();
event.RunAndWaitForStatus(PIPELINE_OK);
+ Flush();
+
// Preroll again to a different timestamp and verify it completed normally.
Preroll(1000, PIPELINE_OK);
}
@@ -567,8 +710,118 @@ TEST_F(AudioRendererImplTest, AbortPendingRead_Pause) {
AbortPendingRead();
event.RunAndWait();
+ Flush();
+
+ // Preroll again to a different timestamp and verify it completed normally.
+ Preroll(1000, PIPELINE_OK);
+}
+
+
+TEST_F(AudioRendererImplTest, AbortPendingRead_Flush) {
+ Initialize();
+
+ Preroll();
+ Play();
+
+ // Partially drain internal buffer so we get a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ WaitForPendingRead();
+
+ Pause();
+
+ EXPECT_TRUE(IsReadPending());
+
+ // Start flushing.
+ WaitableMessageLoopEvent flush_event;
+ renderer_->Flush(flush_event.GetClosure());
+
+ // Simulate the decoder aborting the pending read.
+ AbortPendingRead();
+ flush_event.RunAndWait();
+
+ EXPECT_FALSE(IsReadPending());
+
+ // Preroll again to a different timestamp and verify it completed normally.
+ Preroll(1000, PIPELINE_OK);
+}
+
+TEST_F(AudioRendererImplTest, PendingRead_Pause) {
+ Initialize();
+
+ Preroll();
+ Play();
+
+ // Partially drain internal buffer so we get a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ WaitForPendingRead();
+
+ // Start pausing.
+ WaitableMessageLoopEvent event;
+ renderer_->Pause(event.GetClosure());
+
+ SatisfyPendingRead(kDataSize);
+
+ event.RunAndWait();
+
+ Flush();
+
// Preroll again to a different timestamp and verify it completed normally.
Preroll(1000, PIPELINE_OK);
}
+
+TEST_F(AudioRendererImplTest, PendingRead_Flush) {
+ Initialize();
+
+ Preroll();
+ Play();
+
+ // Partially drain internal buffer so we get a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ WaitForPendingRead();
+
+ Pause();
+
+ EXPECT_TRUE(IsReadPending());
+
+ // Start flushing.
+ WaitableMessageLoopEvent flush_event;
+ renderer_->Flush(flush_event.GetClosure());
+
+ SatisfyPendingRead(kDataSize);
+
+ flush_event.RunAndWait();
+
+ EXPECT_FALSE(IsReadPending());
+
+ // Preroll again to a different timestamp and verify it completed normally.
+ Preroll(1000, PIPELINE_OK);
+}
+
+TEST_F(AudioRendererImplTest, StopDuringFlush) {
+ Initialize();
+
+ Preroll();
+ Play();
+
+ // Partially drain internal buffer so we get a pending read.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ WaitForPendingRead();
+
+ Pause();
+
+ EXPECT_TRUE(IsReadPending());
+
+ // Start flushing.
+ WaitableMessageLoopEvent flush_event;
+ renderer_->Flush(flush_event.GetClosure());
+
+ SatisfyPendingRead(kDataSize);
+
+ // Request a Stop() before the flush completes.
+ WaitableMessageLoopEvent stop_event;
+ renderer_->Stop(stop_event.GetClosure());
+ stop_event.RunAndWait();
+}
+
} // namespace media
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index a4a67f2d640..57ee3f95a9e 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -12,12 +12,12 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_loop.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/video_decoder_config.h"
#include "media/filters/stream_parser_factory.h"
-#include "media/webm/webm_webvtt_parser.h"
using base::TimeDelta;
@@ -35,16 +35,20 @@ class SourceState {
typedef base::Callback<void(
TimeDelta, ChunkDemuxerStream*)> IncreaseDurationCB;
+ typedef base::Callback<void(
+ ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
+
SourceState(scoped_ptr<StreamParser> stream_parser, const LogCB& log_cb,
const CreateDemuxerStreamCB& create_demuxer_stream_cb,
const IncreaseDurationCB& increase_duration_cb);
+ ~SourceState();
+
void Init(const StreamParser::InitCB& init_cb,
bool allow_audio,
bool allow_video,
- const StreamParser::NewTextBuffersCB& text_cb,
const StreamParser::NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb);
+ const NewTextTrackCB& new_text_track_cb);
// Appends new data to the StreamParser.
// Returns true if the data was successfully appended. Returns false if an
@@ -66,6 +70,11 @@ class SourceState {
}
void set_append_window_end(TimeDelta end) { append_window_end_ = end; }
+ void StartReturningData();
+ void AbortReads();
+ void Seek(TimeDelta seek_time);
+ void CompletePendingReadIfPossible();
+
private:
// Called by the |stream_parser_| when a new initialization segment is
// encountered.
@@ -73,7 +82,8 @@ class SourceState {
// processing decoder configurations.
bool OnNewConfigs(bool allow_audio, bool allow_video,
const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config);
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_configs);
// Called by the |stream_parser_| at the beginning of a new media segment.
void OnNewMediaSegment();
@@ -91,12 +101,12 @@ class SourceState {
const StreamParser::BufferQueue& video_buffers);
// Called by the |stream_parser_| when new text buffers have been parsed. It
- // applies |timestamp_offset_| to all buffers in |buffers| and then calls
- // |new_buffers_cb| with the modified buffers.
+ // applies |timestamp_offset_| to all buffers in |buffers| and then appends
+ // the (modified) buffers to the demuxer stream associated with
+ // the track having |text_track_number|.
// Returns true on a successful call. Returns false if an error occured while
// processing the buffers.
- bool OnTextBuffers(const StreamParser::NewTextBuffersCB& new_buffers_cb,
- TextTrack* text_track,
+ bool OnTextBuffers(int text_track_number,
const StreamParser::BufferQueue& buffers);
// Helper function that adds |timestamp_offset_| to each buffer in |buffers|.
@@ -115,6 +125,7 @@ class SourceState {
CreateDemuxerStreamCB create_demuxer_stream_cb_;
IncreaseDurationCB increase_duration_cb_;
+ NewTextTrackCB new_text_track_cb_;
// The offset to apply to media segment timestamps.
TimeDelta timestamp_offset_;
@@ -142,6 +153,9 @@ class SourceState {
ChunkDemuxerStream* video_;
bool video_needs_keyframe_;
+ typedef std::map<int, ChunkDemuxerStream*> TextStreamMap;
+ TextStreamMap text_stream_map_;
+
LogCB log_cb_;
DISALLOW_COPY_AND_ASSIGN(SourceState);
@@ -193,6 +207,7 @@ class ChunkDemuxerStream : public DemuxerStream {
// Returns false if the new config should trigger an error.
bool UpdateAudioConfig(const AudioDecoderConfig& config, const LogCB& log_cb);
bool UpdateVideoConfig(const VideoDecoderConfig& config, const LogCB& log_cb);
+ void UpdateTextConfig(const TextTrackConfig& config, const LogCB& log_cb);
void MarkEndOfStream();
void UnmarkEndOfStream();
@@ -204,6 +219,10 @@ class ChunkDemuxerStream : public DemuxerStream {
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
+ // Returns the text track configuration. It is an error to call this method
+ // if type() != TEXT.
+ TextTrackConfig text_track_config();
+
void set_memory_limit_for_testing(int memory_limit) {
stream_->set_memory_limit_for_testing(memory_limit);
}
@@ -227,7 +246,7 @@ class ChunkDemuxerStream : public DemuxerStream {
bool GetNextBuffer_Locked(DemuxerStream::Status* status,
scoped_refptr<StreamParserBuffer>* buffer);
- // Specifies the type of the stream (must be AUDIO or VIDEO for now).
+ // Specifies the type of the stream.
Type type_;
scoped_ptr<SourceBufferStream> stream_;
@@ -258,13 +277,33 @@ SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
DCHECK(!increase_duration_cb_.is_null());
}
+SourceState::~SourceState() {
+ if (audio_)
+ audio_->Shutdown();
+
+ if (video_)
+ video_->Shutdown();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Shutdown();
+ delete itr->second;
+ }
+}
+
void SourceState::Init(const StreamParser::InitCB& init_cb,
bool allow_audio,
bool allow_video,
- const StreamParser::NewTextBuffersCB& text_cb,
const StreamParser::NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb) {
- StreamParser::NewBuffersCB audio_cb;
+ const NewTextTrackCB& new_text_track_cb) {
+ new_text_track_cb_ = new_text_track_cb;
+
+ StreamParser::NewTextBuffersCB new_text_buffers_cb;
+
+ if (!new_text_track_cb_.is_null()) {
+ new_text_buffers_cb = base::Bind(&SourceState::OnTextBuffers,
+ base::Unretained(this));
+ }
stream_parser_->Init(init_cb,
base::Bind(&SourceState::OnNewConfigs,
@@ -273,10 +312,8 @@ void SourceState::Init(const StreamParser::InitCB& init_cb,
allow_video),
base::Bind(&SourceState::OnNewBuffers,
base::Unretained(this)),
- base::Bind(&SourceState::OnTextBuffers,
- base::Unretained(this), text_cb),
+ new_text_buffers_cb,
need_key_cb,
- add_text_track_cb,
base::Bind(&SourceState::OnNewMediaSegment,
base::Unretained(this)),
base::Bind(&SourceState::OnEndOfMediaSegment,
@@ -303,6 +340,59 @@ void SourceState::Abort() {
can_update_offset_ = true;
}
+
+void SourceState::StartReturningData() {
+ if (audio_)
+ audio_->StartReturningData();
+
+ if (video_)
+ video_->StartReturningData();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->StartReturningData();
+ }
+}
+
+void SourceState::AbortReads() {
+ if (audio_)
+ audio_->AbortReads();
+
+ if (video_)
+ video_->AbortReads();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->AbortReads();
+ }
+}
+
+void SourceState::Seek(TimeDelta seek_time) {
+ if (audio_)
+ audio_->Seek(seek_time);
+
+ if (video_)
+ video_->Seek(seek_time);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Seek(seek_time);
+ }
+}
+
+void SourceState::CompletePendingReadIfPossible() {
+ if (audio_)
+ audio_->CompletePendingReadIfPossible();
+
+ if (video_)
+ video_->CompletePendingReadIfPossible();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->CompletePendingReadIfPossible();
+ }
+}
+
void SourceState::AdjustBufferTimestamps(
const StreamParser::BufferQueue& buffers) {
if (timestamp_offset_ == TimeDelta())
@@ -316,9 +406,11 @@ void SourceState::AdjustBufferTimestamps(
}
}
-bool SourceState::OnNewConfigs(bool allow_audio, bool allow_video,
- const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config) {
+bool SourceState::OnNewConfigs(
+ bool allow_audio, bool allow_video,
+ const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_configs) {
DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video
<< ", " << audio_config.IsValidConfig()
<< ", " << video_config.IsValidConfig() << ")";
@@ -377,6 +469,61 @@ bool SourceState::OnNewConfigs(bool allow_audio, bool allow_video,
success &= video_->UpdateVideoConfig(video_config, log_cb_);
}
+ typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr;
+ if (text_stream_map_.empty()) {
+ for (TextConfigItr itr = text_configs.begin();
+ itr != text_configs.end(); ++itr) {
+ ChunkDemuxerStream* const text_stream =
+ create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
+ text_stream->UpdateTextConfig(itr->second, log_cb_);
+ text_stream_map_[itr->first] = text_stream;
+ new_text_track_cb_.Run(text_stream, itr->second);
+ }
+ } else {
+ const size_t text_count = text_stream_map_.size();
+ if (text_configs.size() != text_count) {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "The number of text track configs changed.";
+ } else if (text_count == 1) {
+ TextConfigItr config_itr = text_configs.begin();
+ const TextTrackConfig& new_config = config_itr->second;
+ TextStreamMap::iterator stream_itr = text_stream_map_.begin();
+ ChunkDemuxerStream* text_stream = stream_itr->second;
+ TextTrackConfig old_config = text_stream->text_track_config();
+ if (!new_config.Matches(old_config)) {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "New text track config does not match old one.";
+ } else {
+ text_stream_map_.clear();
+ text_stream_map_[config_itr->first] = text_stream;
+ }
+ } else {
+ for (TextConfigItr config_itr = text_configs.begin();
+ config_itr != text_configs.end(); ++config_itr) {
+ TextStreamMap::iterator stream_itr =
+ text_stream_map_.find(config_itr->first);
+ if (stream_itr == text_stream_map_.end()) {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "Unexpected text track configuration "
+ "for track ID "
+ << config_itr->first;
+ break;
+ }
+
+ const TextTrackConfig& new_config = config_itr->second;
+ ChunkDemuxerStream* stream = stream_itr->second;
+ TextTrackConfig old_config = stream->text_track_config();
+ if (!new_config.Matches(old_config)) {
+ success &= false;
+ MEDIA_LOG(log_cb_) << "New text track config for track ID "
+ << config_itr->first
+ << " does not match old one.";
+ break;
+ }
+ }
+ }
+ }
+
DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed");
return success;
}
@@ -432,6 +579,11 @@ bool SourceState::OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
if (video_)
video_->OnNewMediaSegment(segment_timestamp);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->OnNewMediaSegment(segment_timestamp);
+ }
}
if (!filtered_audio.empty()) {
@@ -450,15 +602,17 @@ bool SourceState::OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
}
bool SourceState::OnTextBuffers(
- const StreamParser::NewTextBuffersCB& new_buffers_cb,
- TextTrack* text_track,
+ int text_track_number,
const StreamParser::BufferQueue& buffers) {
- if (new_buffers_cb.is_null())
+ DCHECK(!buffers.empty());
+
+ TextStreamMap::iterator itr = text_stream_map_.find(text_track_number);
+ if (itr == text_stream_map_.end())
return false;
AdjustBufferTimestamps(buffers);
- return new_buffers_cb.Run(text_track, buffers);
+ return itr->second->Append(buffers);
}
void SourceState::FilterWithAppendWindow(
@@ -563,7 +717,8 @@ void ChunkDemuxerStream::Seek(TimeDelta time) {
DVLOG(1) << "ChunkDemuxerStream::Seek(" << time.InSecondsF() << ")";
base::AutoLock auto_lock(lock_);
DCHECK(read_cb_.is_null());
- DCHECK(state_ == UNINITIALIZED || state_ == RETURNING_ABORT_FOR_READS);
+ DCHECK(state_ == UNINITIALIZED || state_ == RETURNING_ABORT_FOR_READS)
+ << state_;
stream_->Seek(time);
}
@@ -648,6 +803,15 @@ bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
return stream_->UpdateVideoConfig(config);
}
+void ChunkDemuxerStream::UpdateTextConfig(const TextTrackConfig& config,
+ const LogCB& log_cb) {
+ DCHECK_EQ(type_, TEXT);
+ base::AutoLock auto_lock(lock_);
+ DCHECK(!stream_);
+ DCHECK_EQ(state_, UNINITIALIZED);
+ stream_.reset(new SourceBufferStream(config, log_cb));
+}
+
void ChunkDemuxerStream::MarkEndOfStream() {
base::AutoLock auto_lock(lock_);
stream_->MarkEndOfStream();
@@ -684,6 +848,12 @@ VideoDecoderConfig ChunkDemuxerStream::video_decoder_config() {
return stream_->GetCurrentVideoDecoderConfig();
}
+TextTrackConfig ChunkDemuxerStream::text_track_config() {
+ CHECK_EQ(type_, TEXT);
+ base::AutoLock auto_lock(lock_);
+ return stream_->GetCurrentTextTrackConfig();
+}
+
void ChunkDemuxerStream::ChangeState_Locked(State state) {
lock_.AssertAcquired();
DVLOG(1) << "ChunkDemuxerStream::ChangeState_Locked() : "
@@ -743,14 +913,13 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
ChunkDemuxer::ChunkDemuxer(const base::Closure& open_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const LogCB& log_cb)
: state_(WAITING_FOR_INIT),
cancel_next_seek_(false),
host_(NULL),
open_cb_(open_cb),
need_key_cb_(need_key_cb),
- add_text_track_cb_(add_text_track_cb),
+ enable_text_(false),
log_cb_(log_cb),
duration_(kNoTimestamp()),
user_specified_duration_(-1) {
@@ -758,7 +927,10 @@ ChunkDemuxer::ChunkDemuxer(const base::Closure& open_cb,
DCHECK(!need_key_cb_.is_null());
}
-void ChunkDemuxer::Initialize(DemuxerHost* host, const PipelineStatusCB& cb) {
+void ChunkDemuxer::Initialize(
+ DemuxerHost* host,
+ const PipelineStatusCB& cb,
+ bool enable_text_tracks) {
DVLOG(1) << "Init()";
base::AutoLock auto_lock(lock_);
@@ -770,6 +942,7 @@ void ChunkDemuxer::Initialize(DemuxerHost* host, const PipelineStatusCB& cb) {
}
DCHECK_EQ(state_, WAITING_FOR_INIT);
host_ = host;
+ enable_text_ = enable_text_tracks;
ChangeState_Locked(INITIALIZING);
@@ -820,6 +993,7 @@ void ChunkDemuxer::OnAudioRendererDisabled() {
// Demuxer implementation.
DemuxerStream* ChunkDemuxer::GetStream(DemuxerStream::Type type) {
+ DCHECK_NE(type, DemuxerStream::TEXT);
base::AutoLock auto_lock(lock_);
if (type == DemuxerStream::VIDEO)
return video_.get();
@@ -905,13 +1079,19 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
base::Unretained(this))));
+ SourceState::NewTextTrackCB new_text_track_cb;
+
+ if (enable_text_) {
+ new_text_track_cb = base::Bind(&ChunkDemuxer::OnNewTextTrack,
+ base::Unretained(this));
+ }
+
source_state->Init(
base::Bind(&ChunkDemuxer::OnSourceInitDone, base::Unretained(this)),
has_audio,
has_video,
- base::Bind(&ChunkDemuxer::OnTextBuffers, base::Unretained(this)),
need_key_cb_,
- add_text_track_cb_);
+ new_text_track_cb);
source_state_map_[id] = source_state.release();
return kOk;
@@ -924,17 +1104,11 @@ void ChunkDemuxer::RemoveId(const std::string& id) {
delete source_state_map_[id];
source_state_map_.erase(id);
- if (source_id_audio_ == id) {
- if (audio_)
- audio_->Shutdown();
+ if (source_id_audio_ == id)
source_id_audio_.clear();
- }
- if (source_id_video_ == id) {
- if (video_)
- video_->Shutdown();
+ if (source_id_video_ == id)
source_id_video_.clear();
- }
}
Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges(const std::string& id) const {
@@ -1044,7 +1218,7 @@ void ChunkDemuxer::AppendData(const std::string& id,
base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
}
- ranges = GetBufferedRanges();
+ ranges = GetBufferedRanges_Locked();
}
for (size_t i = 0; i < ranges.size(); ++i)
@@ -1341,6 +1515,10 @@ ChunkDemuxer::CreateDemuxerStream(DemuxerStream::Type type) {
video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO));
return video_.get();
break;
+ case DemuxerStream::TEXT: {
+ return new ChunkDemuxerStream(DemuxerStream::TEXT);
+ break;
+ }
case DemuxerStream::UNKNOWN:
case DemuxerStream::NUM_TYPES:
NOTREACHED();
@@ -1350,30 +1528,11 @@ ChunkDemuxer::CreateDemuxerStream(DemuxerStream::Type type) {
return NULL;
}
-bool ChunkDemuxer::OnTextBuffers(
- TextTrack* text_track,
- const StreamParser::BufferQueue& buffers) {
+void ChunkDemuxer::OnNewTextTrack(ChunkDemuxerStream* text_stream,
+ const TextTrackConfig& config) {
lock_.AssertAcquired();
DCHECK_NE(state_, SHUTDOWN);
-
- // TODO(matthewjheaney): IncreaseDurationIfNecessary
-
- for (StreamParser::BufferQueue::const_iterator itr = buffers.begin();
- itr != buffers.end(); ++itr) {
- const StreamParserBuffer* const buffer = itr->get();
- const TimeDelta start = buffer->timestamp();
- const TimeDelta end = start + buffer->duration();
-
- std::string id, settings, content;
-
- WebMWebVTTParser::Parse(buffer->data(),
- buffer->data_size(),
- &id, &settings, &content);
-
- text_track->addWebVTTCue(start, end, id, content, settings);
- }
-
- return true;
+ host_->AddTextStream(text_stream, config);
}
bool ChunkDemuxer::IsValidId(const std::string& source_id) const {
@@ -1404,7 +1563,8 @@ void ChunkDemuxer::IncreaseDurationIfNecessary(
}
void ChunkDemuxer::DecreaseDurationIfNecessary() {
- Ranges<TimeDelta> ranges = GetBufferedRanges();
+ lock_.AssertAcquired();
+ Ranges<TimeDelta> ranges = GetBufferedRanges_Locked();
if (ranges.size() == 0u)
return;
@@ -1414,6 +1574,12 @@ void ChunkDemuxer::DecreaseDurationIfNecessary() {
}
Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges() const {
+ base::AutoLock auto_lock(lock_);
+ return GetBufferedRanges_Locked();
+}
+
+Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges_Locked() const {
+ lock_.AssertAcquired();
if (audio_ && !video_)
return audio_->GetBufferedRanges(duration_);
else if (!audio_ && video_)
@@ -1422,35 +1588,31 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges() const {
}
void ChunkDemuxer::StartReturningData() {
- if (audio_)
- audio_->StartReturningData();
-
- if (video_)
- video_->StartReturningData();
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->StartReturningData();
+ }
}
void ChunkDemuxer::AbortPendingReads() {
- if (audio_)
- audio_->AbortReads();
-
- if (video_)
- video_->AbortReads();
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->AbortReads();
+ }
}
void ChunkDemuxer::SeekAllSources(TimeDelta seek_time) {
- if (audio_)
- audio_->Seek(seek_time);
-
- if (video_)
- video_->Seek(seek_time);
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->Seek(seek_time);
+ }
}
void ChunkDemuxer::CompletePendingReadsIfPossible() {
- if (audio_)
- audio_->CompletePendingReadIfPossible();
-
- if (video_)
- video_->CompletePendingReadIfPossible();
+ for (SourceStateMap::iterator itr = source_state_map_.begin();
+ itr != source_state_map_.end(); ++itr) {
+ itr->second->CompletePendingReadIfPossible();
+ }
}
} // namespace media
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index e7f6caed37c..51739dbeeca 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -15,7 +15,6 @@
#include "media/base/demuxer.h"
#include "media/base/ranges.h"
#include "media/base/stream_parser.h"
-#include "media/base/text_track.h"
#include "media/filters/source_buffer_stream.h"
namespace media {
@@ -38,19 +37,19 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// is ready to receive media data via AppenData().
// |need_key_cb| Run when the demuxer determines that an encryption key is
// needed to decrypt the content.
- // |add_text_track_cb| Run when demuxer detects the presence of an inband
- // text track.
+ // |enable_text| Process inband text tracks in the normal way when true,
+ // otherwise ignore them.
// |log_cb| Run when parsing error messages need to be logged to the error
// console.
ChunkDemuxer(const base::Closure& open_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const LogCB& log_cb);
virtual ~ChunkDemuxer();
// Demuxer implementation.
virtual void Initialize(DemuxerHost* host,
- const PipelineStatusCB& cb) OVERRIDE;
+ const PipelineStatusCB& cb,
+ bool enable_text_tracks) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
virtual void OnAudioRendererDisabled() OVERRIDE;
@@ -140,6 +139,12 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
void SetMemoryLimitsForTesting(int memory_limit);
+ // Returns the ranges representing the buffered data in the demuxer.
+ // TODO(wolenetz): Remove this method once MediaSourceDelegate no longer
+ // requires it for doing hack browser seeks to I-frame on Android. See
+ // http://crbug.com/304234.
+ Ranges<base::TimeDelta> GetBufferedRanges() const;
+
private:
enum State {
WAITING_FOR_INIT,
@@ -171,8 +176,8 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// has not been created before. Returns NULL otherwise.
ChunkDemuxerStream* CreateDemuxerStream(DemuxerStream::Type type);
- bool OnTextBuffers(TextTrack* text_track,
- const StreamParser::BufferQueue& buffers);
+ void OnNewTextTrack(ChunkDemuxerStream* text_stream,
+ const TextTrackConfig& config);
void OnNewMediaSegment(const std::string& source_id,
base::TimeDelta start_timestamp);
@@ -203,7 +208,7 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
void UpdateDuration(base::TimeDelta new_duration);
// Returns the ranges representing the buffered data in the demuxer.
- Ranges<base::TimeDelta> GetBufferedRanges() const;
+ Ranges<base::TimeDelta> GetBufferedRanges_Locked() const;
// Start returning data on all DemuxerStreams.
void StartReturningData();
@@ -224,12 +229,16 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
DemuxerHost* host_;
base::Closure open_cb_;
NeedKeyCB need_key_cb_;
- AddTextTrackCB add_text_track_cb_;
+ bool enable_text_;
// Callback used to report error strings that can help the web developer
// figure out what is wrong with the content.
LogCB log_cb_;
PipelineStatusCB init_cb_;
+ // Callback to execute upon seek completion.
+ // TODO(wolenetz/acolwell): Protect against possible double-locking by first
+ // releasing |lock_| before executing this callback. See
+ // http://crbug.com/308226
PipelineStatusCB seek_cb_;
scoped_ptr<ChunkDemuxerStream> audio_;
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index 3d9b26f681c..87c9f7074b6 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -159,17 +159,14 @@ class ChunkDemuxerTest : public testing::Test {
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::NeedKeyCB need_key_cb =
base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
- AddTextTrackCB add_text_track_cb =
- base::Bind(&ChunkDemuxerTest::OnTextTrack, base::Unretained(this));
- demuxer_.reset(new ChunkDemuxer(open_cb, need_key_cb,
- add_text_track_cb, LogCB()));
+ demuxer_.reset(new ChunkDemuxer(open_cb, need_key_cb, LogCB()));
}
virtual ~ChunkDemuxerTest() {
ShutdownDemuxer();
}
- void CreateInitSegment(bool has_audio, bool has_video,
+ void CreateInitSegment(bool has_audio, bool has_video, bool has_text,
bool is_audio_encrypted, bool is_video_encrypted,
scoped_ptr<uint8[]>* buffer,
int* size) {
@@ -179,6 +176,7 @@ class ChunkDemuxerTest : public testing::Test {
scoped_refptr<DecoderBuffer> video_track_entry;
scoped_refptr<DecoderBuffer> audio_content_encodings;
scoped_refptr<DecoderBuffer> video_content_encodings;
+ scoped_refptr<DecoderBuffer> text_track_entry;
ebml_header = ReadTestDataFile("webm_ebml_element");
@@ -204,6 +202,27 @@ class ChunkDemuxerTest : public testing::Test {
}
}
+ if (has_text) {
+ // TODO(matthewjheaney): create an abstraction to do
+ // this (http://crbug/321454).
+ // We need it to also handle the creation of multiple text tracks.
+ //
+ // This is the track entry for a text track,
+ // TrackEntry [AE], size=30
+ // TrackNum [D7], size=1, val=3
+ // TrackUID [73] [C5], size=1, value=3
+ // TrackType [83], size=1, val=0x11
+ // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
+ const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
+ "\x83\x81\x11\x86\x92"
+ "D_WEBVTT/SUBTITLES";
+ const int len = strlen(str);
+ DCHECK_EQ(len, 32);
+ const uint8* const buf = reinterpret_cast<const uint8*>(str);
+ text_track_entry = DecoderBuffer::CopyFrom(buf, len);
+ tracks_element_size += text_track_entry->data_size();
+ }
+
*size = ebml_header->data_size() + info->data_size() +
kTracksHeaderSize + tracks_element_size;
@@ -253,6 +272,12 @@ class ChunkDemuxerTest : public testing::Test {
}
buf += video_track_entry->data_size();
}
+
+ if (has_text) {
+ memcpy(buf, text_track_entry->data(),
+ text_track_entry->data_size());
+ buf += text_track_entry->data_size();
+ }
}
ChunkDemuxer::Status AddId() {
@@ -365,22 +390,28 @@ class ChunkDemuxerTest : public testing::Test {
}
void AppendInitSegment(bool has_audio, bool has_video) {
- AppendInitSegmentWithSourceId(kSourceId, has_audio, has_video);
+ AppendInitSegmentWithSourceId(kSourceId, has_audio, has_video, false);
+ }
+
+ void AppendInitSegmentText(bool has_audio, bool has_video) {
+ AppendInitSegmentWithSourceId(kSourceId, has_audio, has_video, true);
}
void AppendInitSegmentWithSourceId(const std::string& source_id,
- bool has_audio, bool has_video) {
+ bool has_audio, bool has_video,
+ bool has_text) {
AppendInitSegmentWithEncryptedInfo(
- source_id, has_audio, has_video, false, false);
+ source_id, has_audio, has_video, has_text, false, false);
}
void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
bool has_audio, bool has_video,
+ bool has_text,
bool is_audio_encrypted,
bool is_video_encrypted) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(has_audio, has_video,
+ CreateInitSegment(has_audio, has_video, has_text,
is_audio_encrypted, is_video_encrypted,
&info_tracks, &info_tracks_size);
AppendData(source_id, info_tracks.get(), info_tracks_size);
@@ -418,11 +449,17 @@ class ChunkDemuxerTest : public testing::Test {
}
bool InitDemuxer(bool has_audio, bool has_video) {
- return InitDemuxerWithEncryptionInfo(has_audio, has_video, false, false);
+ return InitDemuxerWithEncryptionInfo(has_audio, has_video, false,
+ false, false);
+ }
+
+ bool InitDemuxerText(bool has_audio, bool has_video) {
+ return InitDemuxerWithEncryptionInfo(has_audio, has_video, true,
+ false, false);
}
bool InitDemuxerWithEncryptionInfo(
- bool has_audio, bool has_video,
+ bool has_audio, bool has_video, bool has_text,
bool is_audio_encrypted, bool is_video_encrypted) {
PipelineStatus expected_status =
(has_audio || has_video) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
@@ -433,33 +470,39 @@ class ChunkDemuxerTest : public testing::Test {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(expected_duration, expected_status));
+ &host_, CreateInitDoneCB(expected_duration, expected_status), true);
if (AddId(kSourceId, has_audio, has_video) != ChunkDemuxer::kOk)
return false;
AppendInitSegmentWithEncryptedInfo(
- kSourceId, has_audio, has_video,
+ kSourceId, has_audio, has_video, has_text,
is_audio_encrypted, is_video_encrypted);
return true;
}
- bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
- const std::string& video_id) {
+ bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
+ const std::string& video_id,
+ bool has_text) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
if (AddId(audio_id, true, false) != ChunkDemuxer::kOk)
return false;
if (AddId(video_id, false, true) != ChunkDemuxer::kOk)
return false;
- AppendInitSegmentWithSourceId(audio_id, true, false);
- AppendInitSegmentWithSourceId(video_id, false, true);
+ AppendInitSegmentWithSourceId(audio_id, true, false, has_text);
+ AppendInitSegmentWithSourceId(video_id, false, true, has_text);
return true;
}
+ bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
+ const std::string& video_id) {
+ return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
+ }
+
// Initializes the demuxer with data from 2 files with different
// decoder configurations. This is used to test the decoder config change
// logic.
@@ -484,7 +527,7 @@ class ChunkDemuxerTest : public testing::Test {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
- PIPELINE_OK));
+ PIPELINE_OK), true);
if (AddId(kSourceId, true, true) != ChunkDemuxer::kOk)
return false;
@@ -810,7 +853,7 @@ class ChunkDemuxerTest : public testing::Test {
bool has_audio, bool has_video) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(duration, PIPELINE_OK));
+ &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
if (AddId(kSourceId, has_audio, has_video) != ChunkDemuxer::kOk)
return false;
@@ -861,12 +904,6 @@ class ChunkDemuxerTest : public testing::Test {
NeedKeyMock(type, init_data_ptr, init_data.size());
}
- scoped_ptr<TextTrack> OnTextTrack(TextKind kind,
- const std::string& label,
- const std::string& language) {
- return scoped_ptr<TextTrack>();
- }
-
void Seek(base::TimeDelta seek_time) {
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
@@ -913,7 +950,62 @@ TEST_F(ChunkDemuxerTest, Init) {
}
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
- has_audio, has_video, is_audio_encrypted, is_video_encrypted));
+ has_audio, has_video, false, is_audio_encrypted, is_video_encrypted));
+
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ if (has_audio) {
+ ASSERT_TRUE(audio_stream);
+
+ const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
+ EXPECT_EQ(kCodecVorbis, config.codec());
+ EXPECT_EQ(32, config.bits_per_channel());
+ EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
+ EXPECT_EQ(44100, config.samples_per_second());
+ EXPECT_TRUE(config.extra_data());
+ EXPECT_GT(config.extra_data_size(), 0u);
+ EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
+ EXPECT_EQ(is_audio_encrypted,
+ audio_stream->audio_decoder_config().is_encrypted());
+ } else {
+ EXPECT_FALSE(audio_stream);
+ }
+
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ if (has_video) {
+ EXPECT_TRUE(video_stream);
+ EXPECT_EQ(is_video_encrypted,
+ video_stream->video_decoder_config().is_encrypted());
+ } else {
+ EXPECT_FALSE(video_stream);
+ }
+
+ ShutdownDemuxer();
+ demuxer_.reset();
+ }
+}
+
+TEST_F(ChunkDemuxerTest, InitText) {
+ // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
+ // No encryption cases handled here.
+ bool has_video = true;
+ bool is_audio_encrypted = false;
+ bool is_video_encrypted = false;
+ for (int i = 0; i < 2; i++) {
+ bool has_audio = (i & 0x1) != 0;
+
+ CreateNewDemuxer();
+
+ DemuxerStream* text_stream = NULL;
+ TextTrackConfig text_config;
+ EXPECT_CALL(host_, AddTextStream(_,_))
+ .WillOnce(DoAll(SaveArg<0>(&text_stream),
+ SaveArg<1>(&text_config)));
+
+ ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
+ has_audio, has_video, true, is_audio_encrypted, is_video_encrypted));
+ ASSERT_TRUE(text_stream);
+ EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
+ EXPECT_EQ(kTextSubtitles, text_config.kind());
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
@@ -953,12 +1045,27 @@ TEST_F(ChunkDemuxerTest, ShutdownBeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
- kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN));
+ kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
EXPECT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
- AppendInitSegmentWithSourceId("audio", true, false);
+ AppendInitSegmentWithSourceId("audio", true, false, false);
+}
+
+TEST_F(ChunkDemuxerTest, ShutdownBeforeAllInitSegmentsAppendedText) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_, CreateInitDoneCB(
+ kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
+
+ EXPECT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
+ EXPECT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
+
+ EXPECT_CALL(host_, AddTextStream(_,_))
+ .Times(Exactly(1));
+
+ AppendInitSegmentWithSourceId("video", false, true, true);
}
// Test that Seek() completes successfully when the first cluster
@@ -1033,7 +1140,8 @@ TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(true, true, false, false, &info_tracks, &info_tracks_size);
+ CreateInitSegment(true, true, false,
+ false, false, &info_tracks, &info_tracks_size);
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size);
}
@@ -1139,7 +1247,7 @@ TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
+ &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
@@ -1150,14 +1258,14 @@ TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
TEST_F(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
+ &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
MarkEndOfStream(PIPELINE_OK);
}
TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
+ &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
@@ -1356,13 +1464,14 @@ TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
TEST_F(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
- CreateInitSegment(true, true, false, false, &info_tracks, &info_tracks_size);
+ CreateInitSegment(true, true, false,
+ false, false, &info_tracks, &info_tracks_size);
scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
@@ -1524,7 +1633,7 @@ TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
- kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN));
+ kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
@@ -1536,7 +1645,7 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
- DEMUXER_ERROR_COULD_NOT_OPEN));
+ DEMUXER_ERROR_COULD_NOT_OPEN), true);
std::vector<std::string> codecs(1);
codecs[0] = "vorbis";
@@ -1550,7 +1659,7 @@ TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
- DEMUXER_ERROR_COULD_NOT_OPEN));
+ DEMUXER_ERROR_COULD_NOT_OPEN), true);
std::vector<std::string> codecs(1);
codecs[0] = "vp8";
@@ -1587,10 +1696,30 @@ TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
GenerateVideoStreamExpectedReads(0, 4);
}
+TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
+ // TODO(matthewjheaney): Here and elsewhere, we need more tests
+ // for inband text tracks (http://crbug/321455).
+
+ std::string audio_id = "audio1";
+ std::string video_id = "video1";
+
+ EXPECT_CALL(host_, AddTextStream(_,_))
+ .Times(Exactly(2));
+ ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
+
+ // Append audio and video data into separate source ids.
+ AppendCluster(audio_id,
+ GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
+ GenerateAudioStreamExpectedReads(0, 4);
+ AppendCluster(video_id,
+ GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
+ GenerateVideoStreamExpectedReads(0, 4);
+}
+
TEST_F(ChunkDemuxerTest, AddIdFailures) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
std::string audio_id = "audio1";
std::string video_id = "video1";
@@ -1600,7 +1729,7 @@ TEST_F(ChunkDemuxerTest, AddIdFailures) {
// Adding an id with audio/video should fail because we already added audio.
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
- AppendInitSegmentWithSourceId(audio_id, true, false);
+ AppendInitSegmentWithSourceId(audio_id, true, false, false);
// Adding an id after append should fail.
ASSERT_EQ(AddId(video_id, false, true), ChunkDemuxer::kReachedIdLimit);
@@ -1829,7 +1958,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, true, false), ChunkDemuxer::kOk);
AppendInitSegment(true, false);
@@ -1851,7 +1980,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, false, true), ChunkDemuxer::kOk);
AppendInitSegment(false, true);
@@ -2093,7 +2222,7 @@ TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
- demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK));
+ demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
ASSERT_EQ(AddId("audio", true, false), ChunkDemuxer::kOk);
ASSERT_EQ(AddId("video", false, true), ChunkDemuxer::kOk);
@@ -2417,7 +2546,7 @@ TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
TEST_F(ChunkDemuxerTest, ShutdownBeforeInitialize) {
demuxer_->Shutdown();
demuxer_->Initialize(
- &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN));
+ &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
message_loop_.RunUntilIdle();
}
@@ -2566,7 +2695,7 @@ TEST_F(ChunkDemuxerTest, GCDuringSeek) {
TEST_F(ChunkDemuxerTest, RemoveBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
- &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK));
+ &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, true, true));
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index 55021489ba6..a26498cda55 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -78,10 +78,13 @@ void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
DVLOG(2) << __FUNCTION__ << " - state: " << state_;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized) << state_;
+ DCHECK(state_ != kStopped) << state_;
DCHECK(reset_cb_.is_null());
reset_cb_ = BindToCurrentLoop(closure);
+ // TODO(xhwang): This should not happen. Remove it, DCHECK against the
+ // condition and clean up related tests.
if (state_ == kDecryptorRequested) {
DCHECK(!init_cb_.is_null());
set_decryptor_ready_cb_.Run(DecryptorReadyCB());
@@ -111,6 +114,38 @@ void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
DoReset();
}
+void DecryptingDemuxerStream::Stop(const base::Closure& closure) {
+ DVLOG(2) << __FUNCTION__ << " - state: " << state_;
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(state_ != kUninitialized) << state_;
+
+ // Invalidate all weak pointers so that pending callbacks won't fire.
+ weak_factory_.InvalidateWeakPtrs();
+
+ // At this point the render thread is likely paused (in WebMediaPlayerImpl's
+ // Destroy()), so running |closure| can't wait for anything that requires the
+ // render thread to process messages to complete (such as PPAPI methods).
+ if (decryptor_) {
+ // Clear the callback.
+ decryptor_->RegisterNewKeyCB(GetDecryptorStreamType(),
+ Decryptor::NewKeyCB());
+ decryptor_->CancelDecrypt(GetDecryptorStreamType());
+ decryptor_ = NULL;
+ }
+ if (!set_decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&set_decryptor_ready_cb_).Run(DecryptorReadyCB());
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
+ if (!read_cb_.is_null())
+ base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+ if (!reset_cb_.is_null())
+ base::ResetAndReturn(&reset_cb_).Run();
+ pending_buffer_to_decrypt_ = NULL;
+
+ state_ = kStopped;
+ BindToCurrentLoop(closure).Run();
+}
+
AudioDecoderConfig DecryptingDemuxerStream::audio_decoder_config() {
DCHECK(state_ != kUninitialized && state_ != kDecryptorRequested) << state_;
CHECK_EQ(demuxer_stream_->type(), AUDIO);
@@ -170,6 +205,23 @@ void DecryptingDemuxerStream::DecryptBuffer(
DCHECK(!read_cb_.is_null());
DCHECK_EQ(buffer.get() != NULL, status == kOk) << status;
+ // Even when |!reset_cb_.is_null()|, we need to pass |kConfigChanged| back to
+ // the caller so that the downstream decoder can be properly reinitialized.
+ if (status == kConfigChanged) {
+ DVLOG(2) << "DoDecryptBuffer() - kConfigChanged.";
+ DCHECK_EQ(demuxer_stream_->type() == AUDIO, audio_config_.IsValidConfig());
+ DCHECK_EQ(demuxer_stream_->type() == VIDEO, video_config_.IsValidConfig());
+
+ // Update the decoder config, which the decoder will use when it is notified
+ // of kConfigChanged.
+ InitializeDecoderConfig();
+ state_ = kIdle;
+ base::ResetAndReturn(&read_cb_).Run(kConfigChanged, NULL);
+ if (!reset_cb_.is_null())
+ DoReset();
+ return;
+ }
+
if (!reset_cb_.is_null()) {
base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
DoReset();
@@ -183,23 +235,24 @@ void DecryptingDemuxerStream::DecryptBuffer(
return;
}
- if (status == kConfigChanged) {
- DVLOG(2) << "DoDecryptBuffer() - kConfigChanged.";
- DCHECK_EQ(demuxer_stream_->type() == AUDIO, audio_config_.IsValidConfig());
- DCHECK_EQ(demuxer_stream_->type() == VIDEO, video_config_.IsValidConfig());
-
- // Update the decoder config, which the decoder will use when it is notified
- // of kConfigChanged.
- InitializeDecoderConfig();
+ if (buffer->end_of_stream()) {
+ DVLOG(2) << "DoDecryptBuffer() - EOS buffer.";
state_ = kIdle;
- base::ResetAndReturn(&read_cb_).Run(kConfigChanged, NULL);
+ base::ResetAndReturn(&read_cb_).Run(status, buffer);
return;
}
- if (buffer->end_of_stream()) {
- DVLOG(2) << "DoDecryptBuffer() - EOS buffer.";
+ DCHECK(buffer->decrypt_config());
+ // An empty iv string signals that the frame is unencrypted.
+ if (buffer->decrypt_config()->iv().empty()) {
+ DVLOG(2) << "DoDecryptBuffer() - clear buffer.";
+ int data_offset = buffer->decrypt_config()->data_offset();
+ scoped_refptr<DecoderBuffer> decrypted = DecoderBuffer::CopyFrom(
+ buffer->data() + data_offset, buffer->data_size() - data_offset);
+ decrypted->set_timestamp(buffer->timestamp());
+ decrypted->set_duration(buffer->duration());
state_ = kIdle;
- base::ResetAndReturn(&read_cb_).Run(status, buffer);
+ base::ResetAndReturn(&read_cb_).Run(kOk, decrypted);
return;
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.h b/chromium/media/filters/decrypting_demuxer_stream.h
index cc34c04e27e..394cb5b7fc9 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.h
+++ b/chromium/media/filters/decrypting_demuxer_stream.h
@@ -36,11 +36,19 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
void Initialize(DemuxerStream* stream,
const PipelineStatusCB& status_cb);
- // Cancels all pending operations and fires all pending callbacks. Sets
- // |this| to kUninitialized state if |this| hasn't been initialized, or to
- // kIdle state otherwise.
+ // Cancels all pending operations and fires all pending callbacks. If in
+ // kPendingDemuxerRead or kPendingDecrypt state, waits for the pending
+ // operation to finish before satisfying |closure|. Sets the state to
+ // kUninitialized if |this| hasn't been initialized, or to kIdle otherwise.
void Reset(const base::Closure& closure);
+ // Cancels all pending operations immediately and fires all pending callbacks
+ // and sets the state to kStopped. Does NOT wait for any pending operations.
+ // Note: During the teardown process, media pipeline will be waiting on the
+ // render main thread. If a Decryptor depends on the render main thread
+ // (e.g. PpapiDecryptor), the pending DecryptCB would not be satisfied.
+ void Stop(const base::Closure& closure);
+
// DemuxerStream implementation.
virtual void Read(const ReadCB& read_cb) OVERRIDE;
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
@@ -60,6 +68,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
kPendingDemuxerRead,
kPendingDecrypt,
kWaitingForKey,
+ kStopped
};
// Callback for DecryptorHost::RequestDecryptor().
diff --git a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
index 585f3d0eb0a..3e41734aa4e 100644
--- a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -28,15 +28,17 @@ static const int kFakeBufferSize = 16;
static const uint8 kFakeKeyId[] = { 0x4b, 0x65, 0x79, 0x20, 0x49, 0x44 };
static const uint8 kFakeIv[DecryptConfig::kDecryptionKeySize] = { 0 };
-// Create a fake non-empty encrypted buffer.
-static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
+// Create a fake non-empty buffer in an encrypted stream. When |is_clear| is
+// ture, the buffer is not encrypted (signaled by an empty IV).
+static scoped_refptr<DecoderBuffer> CreateFakeEncryptedStreamBuffer(
+ bool is_clear) {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(kFakeBufferSize));
+ std::string iv = is_clear ? std::string() :
+ std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv));
buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(new DecryptConfig(
std::string(reinterpret_cast<const char*>(kFakeKeyId),
arraysize(kFakeKeyId)),
- std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv)),
- 0,
- std::vector<SubsampleEntry>())));
+ iv, 0, std::vector<SubsampleEntry>())));
return buffer;
}
@@ -48,9 +50,11 @@ ACTION_P(ReturnBuffer, buffer) {
arg0.Run(buffer.get() ? DemuxerStream::kOk : DemuxerStream::kAborted, buffer);
}
-ACTION_P(RunCallbackIfNotNull, param) {
+ACTION_P2(SetDecryptorIfNotNull, decryptor, is_decryptor_set) {
if (!arg0.is_null())
- arg0.Run(param);
+ arg0.Run(decryptor);
+
+ *is_decryptor_set = !arg0.is_null() && decryptor;
}
ACTION_P2(ResetAndRunCallback, callback, param) {
@@ -72,11 +76,13 @@ class DecryptingDemuxerStreamTest : public testing::Test {
&DecryptingDemuxerStreamTest::RequestDecryptorNotification,
base::Unretained(this)))),
decryptor_(new StrictMock<MockDecryptor>()),
+ is_decryptor_set_(false),
input_audio_stream_(
new StrictMock<MockDemuxerStream>(DemuxerStream::AUDIO)),
input_video_stream_(
new StrictMock<MockDemuxerStream>(DemuxerStream::VIDEO)),
- encrypted_buffer_(CreateFakeEncryptedBuffer()),
+ clear_buffer_(CreateFakeEncryptedStreamBuffer(true)),
+ encrypted_buffer_(CreateFakeEncryptedStreamBuffer(false)),
decrypted_buffer_(new DecoderBuffer(kFakeBufferSize)) {
}
@@ -103,9 +109,9 @@ class DecryptingDemuxerStreamTest : public testing::Test {
void Initialize() {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
- .WillOnce(RunCallbackIfNotNull(decryptor_.get()));
+ .WillOnce(SetDecryptorIfNotNull(decryptor_.get(), &is_decryptor_set_));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kAudio, _))
- .WillOnce(SaveArg<1>(&key_added_cb_));
+ .WillRepeatedly(SaveArg<1>(&key_added_cb_));
AudioDecoderConfig input_config(
kCodecVorbis, kSampleFormatPlanarF32, CHANNEL_LAYOUT_STEREO, 44100,
@@ -139,6 +145,23 @@ class DecryptingDemuxerStreamTest : public testing::Test {
message_loop_.RunUntilIdle();
}
+ void EnterClearReadingState() {
+ EXPECT_TRUE(clear_buffer_->decrypt_config());
+ EXPECT_CALL(*input_audio_stream_, Read(_))
+ .WillOnce(ReturnBuffer(clear_buffer_));
+
+ // For clearbuffer, decryptor->Decrypt() will not be called.
+
+ scoped_refptr<DecoderBuffer> decrypted_buffer;
+ EXPECT_CALL(*this, BufferReady(DemuxerStream::kOk, _))
+ .WillOnce(SaveArg<1>(&decrypted_buffer));
+ demuxer_stream_->Read(base::Bind(&DecryptingDemuxerStreamTest::BufferReady,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypted_buffer->decrypt_config());
+ }
+
// Sets up expectations and actions to put DecryptingDemuxerStream in an
// active normal reading state.
void EnterNormalReadingState() {
@@ -194,15 +217,32 @@ class DecryptingDemuxerStreamTest : public testing::Test {
}
}
+ void SatisfyPendingDemuxerReadCB(DemuxerStream::Status status) {
+ scoped_refptr<DecoderBuffer> buffer =
+ (status == DemuxerStream::kOk) ? encrypted_buffer_ : NULL;
+ base::ResetAndReturn(&pending_demuxer_read_cb_).Run(status, buffer);
+ }
+
void Reset() {
- EXPECT_CALL(*decryptor_, CancelDecrypt(Decryptor::kAudio))
- .WillRepeatedly(InvokeWithoutArgs(
- this, &DecryptingDemuxerStreamTest::AbortPendingDecryptCB));
+ if (is_decryptor_set_) {
+ EXPECT_CALL(*decryptor_, CancelDecrypt(Decryptor::kAudio))
+ .WillRepeatedly(InvokeWithoutArgs(
+ this, &DecryptingDemuxerStreamTest::AbortPendingDecryptCB));
+ }
demuxer_stream_->Reset(NewExpectedClosure());
message_loop_.RunUntilIdle();
}
+ // Stops the |demuxer_stream_| without satisfying/aborting any pending
+ // operations.
+ void Stop() {
+ if (is_decryptor_set_)
+ EXPECT_CALL(*decryptor_, CancelDecrypt(Decryptor::kAudio));
+ demuxer_stream_->Stop(NewExpectedClosure());
+ message_loop_.RunUntilIdle();
+ }
+
MOCK_METHOD1(RequestDecryptorNotification, void(const DecryptorReadyCB&));
MOCK_METHOD2(BufferReady, void(DemuxerStream::Status,
@@ -211,6 +251,8 @@ class DecryptingDemuxerStreamTest : public testing::Test {
base::MessageLoop message_loop_;
scoped_ptr<DecryptingDemuxerStream> demuxer_stream_;
scoped_ptr<StrictMock<MockDecryptor> > decryptor_;
+ // Whether a valid Decryptor is set to the |demuxer_stream_|.
+ bool is_decryptor_set_;
scoped_ptr<StrictMock<MockDemuxerStream> > input_audio_stream_;
scoped_ptr<StrictMock<MockDemuxerStream> > input_video_stream_;
@@ -220,6 +262,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
// Constant buffers to be returned by the input demuxer streams and the
// |decryptor_|.
+ scoped_refptr<DecoderBuffer> clear_buffer_;
scoped_refptr<DecoderBuffer> encrypted_buffer_;
scoped_refptr<DecoderBuffer> decrypted_buffer_;
@@ -233,9 +276,9 @@ TEST_F(DecryptingDemuxerStreamTest, Initialize_NormalAudio) {
TEST_F(DecryptingDemuxerStreamTest, Initialize_NormalVideo) {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
- .WillOnce(RunCallbackIfNotNull(decryptor_.get()));
+ .WillOnce(SetDecryptorIfNotNull(decryptor_.get(), &is_decryptor_set_));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kVideo, _))
- .WillOnce(SaveArg<1>(&key_added_cb_));
+ .WillOnce(SaveArg<1>(&key_added_cb_));
VideoDecoderConfig input_config = TestVideoConfig::NormalEncrypted();
InitializeVideoAndExpectStatus(input_config, PIPELINE_OK);
@@ -260,19 +303,26 @@ TEST_F(DecryptingDemuxerStreamTest, Initialize_NormalVideo) {
TEST_F(DecryptingDemuxerStreamTest, Initialize_NullDecryptor) {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
- .WillRepeatedly(RunCallbackIfNotNull(static_cast<Decryptor*>(NULL)));
+ .WillRepeatedly(SetDecryptorIfNotNull(static_cast<Decryptor*>(NULL),
+ &is_decryptor_set_));
AudioDecoderConfig input_config(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, true);
InitializeAudioAndExpectStatus(input_config, DECODER_ERROR_NOT_SUPPORTED);
}
-// Test normal read case.
+// Test normal read case where the buffer is encrypted.
TEST_F(DecryptingDemuxerStreamTest, Read_Normal) {
Initialize();
EnterNormalReadingState();
}
+// Test normal read case where the buffer is clear.
+TEST_F(DecryptingDemuxerStreamTest, Read_Clear) {
+ Initialize();
+ EnterClearReadingState();
+}
+
// Test the case where the decryptor returns error during read.
TEST_F(DecryptingDemuxerStreamTest, Read_DecryptError) {
Initialize();
@@ -362,8 +412,7 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringPendingDemuxerRead) {
EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
Reset();
- base::ResetAndReturn(&pending_demuxer_read_cb_).Run(DemuxerStream::kOk,
- encrypted_buffer_);
+ SatisfyPendingDemuxerReadCB(DemuxerStream::kOk);
message_loop_.RunUntilIdle();
}
@@ -406,9 +455,8 @@ TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_Aborted) {
ReadAndExpectBufferReadyWith(DemuxerStream::kAborted, NULL);
}
-// Test aborted read on the input demuxer stream when the
-// DecryptingDemuxerStream is being reset.
-TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_AbortedDuringReset) {
+// Test resetting when DecryptingDemuxerStream is waiting for an aborted read.
+TEST_F(DecryptingDemuxerStreamTest, Reset_DuringAbortedDemuxerRead) {
Initialize();
EnterPendingReadState();
@@ -416,8 +464,7 @@ TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_AbortedDuringReset) {
EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
Reset();
- base::ResetAndReturn(&pending_demuxer_read_cb_).Run(DemuxerStream::kAborted,
- NULL);
+ SatisfyPendingDemuxerReadCB(DemuxerStream::kAborted);
message_loop_.RunUntilIdle();
}
@@ -437,4 +484,81 @@ TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_ConfigChanged) {
ReadAndExpectBufferReadyWith(DemuxerStream::kConfigChanged, NULL);
}
+// Test resetting when DecryptingDemuxerStream is waiting for a config changed
+// read.
+TEST_F(DecryptingDemuxerStreamTest, Reset_DuringConfigChangedDemuxerRead) {
+ Initialize();
+ EnterPendingReadState();
+
+ // Make sure we get a |kConfigChanged| instead of a |kAborted|.
+ EXPECT_CALL(*this, BufferReady(DemuxerStream::kConfigChanged, IsNull()));
+
+ Reset();
+ SatisfyPendingDemuxerReadCB(DemuxerStream::kConfigChanged);
+ message_loop_.RunUntilIdle();
+}
+
+// Test stopping when the DecryptingDemuxerStream is in kDecryptorRequested
+// state.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringDecryptorRequested) {
+ // One for decryptor request, one for canceling request during Reset().
+ EXPECT_CALL(*this, RequestDecryptorNotification(_))
+ .Times(2);
+ AudioDecoderConfig input_config(
+ kCodecVorbis, kSampleFormatPlanarF32, CHANNEL_LAYOUT_STEREO, 44100,
+ NULL, 0, true);
+ InitializeAudioAndExpectStatus(input_config, PIPELINE_ERROR_ABORT);
+ Stop();
+}
+
+// Test stopping when the DecryptingDemuxerStream is in kIdle state but has
+// not returned any buffer.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringIdleAfterInitialization) {
+ Initialize();
+ Stop();
+}
+
+// Test stopping when the DecryptingDemuxerStream is in kIdle state after it
+// has returned one buffer.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringIdleAfterReadOneBuffer) {
+ Initialize();
+ EnterNormalReadingState();
+ Stop();
+}
+
+// Test stopping when DecryptingDemuxerStream is in kPendingDemuxerRead state.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringPendingDemuxerRead) {
+ Initialize();
+ EnterPendingReadState();
+
+ EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
+ Stop();
+}
+
+// Test stopping when the DecryptingDemuxerStream is in kPendingDecrypt state.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringPendingDecrypt) {
+ Initialize();
+ EnterPendingDecryptState();
+
+ EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
+ Stop();
+}
+
+// Test stopping when the DecryptingDemuxerStream is in kWaitingForKey state.
+TEST_F(DecryptingDemuxerStreamTest, Stop_DuringWaitingForKey) {
+ Initialize();
+ EnterWaitingForKeyState();
+
+ EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
+ Stop();
+}
+
+// Test stopping after the DecryptingDemuxerStream has been reset.
+TEST_F(DecryptingDemuxerStreamTest, Stop_AfterReset) {
+ Initialize();
+ EnterNormalReadingState();
+ Reset();
+ Stop();
+}
+
} // namespace media
diff --git a/chromium/media/filters/decrypting_video_decoder.cc b/chromium/media/filters/decrypting_video_decoder.cc
index cb94c8ca758..b3ea2144558 100644
--- a/chromium/media/filters/decrypting_video_decoder.cc
+++ b/chromium/media/filters/decrypting_video_decoder.cc
@@ -80,7 +80,7 @@ void DecryptingVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
return;
}
@@ -211,7 +211,7 @@ void DecryptingVideoDecoder::DecodePendingBuffer() {
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
TRACE_EVENT_ASYNC_BEGIN0(
- "eme", "DecryptingVideoDecoder::DecodePendingBuffer", ++trace_id_);
+ "media", "DecryptingVideoDecoder::DecodePendingBuffer", ++trace_id_);
int buffer_size = 0;
if (!pending_buffer_to_decode_->end_of_stream()) {
@@ -230,7 +230,7 @@ void DecryptingVideoDecoder::DeliverFrame(
DVLOG(3) << "DeliverFrame() - status: " << status;
DCHECK(message_loop_->BelongsToCurrentThread());
TRACE_EVENT_ASYNC_END0(
- "eme", "DecryptingVideoDecoder::DecodePendingBuffer", trace_id_);
+ "media", "DecryptingVideoDecoder::DecodePendingBuffer", trace_id_);
if (state_ == kStopped)
return;
@@ -282,7 +282,7 @@ void DecryptingVideoDecoder::DeliverFrame(
if (scoped_pending_buffer_to_decode->end_of_stream()) {
state_ = kDecodeFinished;
base::ResetAndReturn(&decode_cb_).Run(
- kOk, media::VideoFrame::CreateEmptyFrame());
+ kOk, media::VideoFrame::CreateEOSFrame());
return;
}
@@ -293,7 +293,7 @@ void DecryptingVideoDecoder::DeliverFrame(
DCHECK_EQ(status, Decryptor::kSuccess);
// No frame returned with kSuccess should be end-of-stream frame.
- DCHECK(!frame->IsEndOfStream());
+ DCHECK(!frame->end_of_stream());
state_ = kIdle;
base::ResetAndReturn(&decode_cb_).Run(kOk, frame);
}
diff --git a/chromium/media/filters/decrypting_video_decoder_unittest.cc b/chromium/media/filters/decrypting_video_decoder_unittest.cc
index adf0585d140..1e8bee9fece 100644
--- a/chromium/media/filters/decrypting_video_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_video_decoder_unittest.cc
@@ -56,7 +56,7 @@ ACTION_P2(ResetAndRunCallback, callback, param) {
}
MATCHER(IsEndOfStream, "end of stream") {
- return (arg->IsEndOfStream());
+ return (arg->end_of_stream());
}
} // namespace
@@ -74,7 +74,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
decoded_video_frame_(VideoFrame::CreateBlackFrame(
TestVideoConfig::NormalCodedSize())),
null_video_frame_(scoped_refptr<VideoFrame>()),
- end_of_stream_video_frame_(VideoFrame::CreateEmptyFrame()) {
+ end_of_stream_video_frame_(VideoFrame::CreateEOSFrame()) {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.WillRepeatedly(RunCallbackIfNotNull(decryptor_.get()));
}
@@ -109,7 +109,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
const scoped_refptr<VideoFrame>& video_frame) {
if (status != VideoDecoder::kOk)
EXPECT_CALL(*this, FrameReady(status, IsNull()));
- else if (video_frame.get() && video_frame->IsEndOfStream())
+ else if (video_frame.get() && video_frame->end_of_stream())
EXPECT_CALL(*this, FrameReady(status, IsEndOfStream()));
else
EXPECT_CALL(*this, FrameReady(status, video_frame));
diff --git a/chromium/media/filters/fake_demuxer_stream.cc b/chromium/media/filters/fake_demuxer_stream.cc
index 200b287b030..c6daa9f405f 100644
--- a/chromium/media/filters/fake_demuxer_stream.cc
+++ b/chromium/media/filters/fake_demuxer_stream.cc
@@ -18,12 +18,17 @@
namespace media {
-static const int kStartTimestampMs = 0;
-static const int kDurationMs = 30;
-static const int kStartWidth = 320;
-static const int kStartHeight = 240;
-static const int kWidthDelta = 4;
-static const int kHeightDelta = 3;
+const int kStartTimestampMs = 0;
+const int kDurationMs = 30;
+const int kStartWidth = 320;
+const int kStartHeight = 240;
+const int kWidthDelta = 4;
+const int kHeightDelta = 3;
+const uint8 kKeyId[] = { 0x00, 0x01, 0x02, 0x03 };
+const uint8 kIv[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
FakeDemuxerStream::FakeDemuxerStream(int num_configs,
int num_buffers_in_one_config,
@@ -142,6 +147,13 @@ void FakeDemuxerStream::DoRead() {
video_decoder_config_, current_timestamp_, duration_);
// TODO(xhwang): Output out-of-order buffers if needed.
+ if (is_encrypted_) {
+ buffer->set_decrypt_config(scoped_ptr<DecryptConfig>(
+ new DecryptConfig(std::string(kKeyId, kKeyId + arraysize(kKeyId)),
+ std::string(kIv, kIv + arraysize(kIv)),
+ 0,
+ std::vector<SubsampleEntry>())));
+ }
buffer->set_timestamp(current_timestamp_);
buffer->set_duration(duration_);
current_timestamp_ += duration_;
diff --git a/chromium/media/filters/fake_video_decoder.cc b/chromium/media/filters/fake_video_decoder.cc
index 24b7c62aa10..dbb16db0d9e 100644
--- a/chromium/media/filters/fake_video_decoder.cc
+++ b/chromium/media/filters/fake_video_decoder.cc
@@ -59,7 +59,7 @@ void FakeVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
&FakeVideoDecoder::OnFrameDecoded, weak_this_, buffer_size, decode_cb)));
if (buffer->end_of_stream() && decoded_frames_.empty()) {
- decode_cb_.RunOrHold(kOk, VideoFrame::CreateEmptyFrame());
+ decode_cb_.RunOrHold(kOk, VideoFrame::CreateEOSFrame());
return;
}
diff --git a/chromium/media/filters/fake_video_decoder_unittest.cc b/chromium/media/filters/fake_video_decoder_unittest.cc
index c19017bfead..0aa3b5fc31d 100644
--- a/chromium/media/filters/fake_video_decoder_unittest.cc
+++ b/chromium/media/filters/fake_video_decoder_unittest.cc
@@ -63,7 +63,7 @@ class FakeVideoDecoderTest : public testing::Test {
decode_status_ = status;
frame_decoded_ = frame;
- if (frame && !frame->IsEndOfStream())
+ if (frame && !frame->end_of_stream())
num_decoded_frames_++;
}
@@ -85,7 +85,7 @@ class FakeVideoDecoderTest : public testing::Test {
EXPECT_FALSE(is_decode_pending_);
ASSERT_EQ(VideoDecoder::kOk, decode_status_);
ASSERT_TRUE(frame_decoded_);
- EXPECT_FALSE(frame_decoded_->IsEndOfStream());
+ EXPECT_FALSE(frame_decoded_->end_of_stream());
break;
case NOT_ENOUGH_DATA:
EXPECT_FALSE(is_decode_pending_);
@@ -101,7 +101,7 @@ class FakeVideoDecoderTest : public testing::Test {
EXPECT_FALSE(is_decode_pending_);
ASSERT_EQ(VideoDecoder::kOk, decode_status_);
ASSERT_TRUE(frame_decoded_);
- EXPECT_TRUE(frame_decoded_->IsEndOfStream());
+ EXPECT_TRUE(frame_decoded_->end_of_stream());
break;
}
}
@@ -139,7 +139,7 @@ class FakeVideoDecoderTest : public testing::Test {
void ReadUntilEOS() {
do {
ReadOneFrame();
- } while (frame_decoded_ && !frame_decoded_->IsEndOfStream());
+ } while (frame_decoded_ && !frame_decoded_->end_of_stream());
}
void EnterPendingReadState() {
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index f41c89318cc..bb64c36af62 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -15,6 +15,7 @@
#include "media/base/bind_to_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer.h"
+#include "media/base/limits.h"
#include "media/base/pipeline.h"
#include "media/base/sample_format.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -67,15 +68,13 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
: message_loop_(message_loop),
weak_factory_(this),
demuxer_stream_(NULL),
- codec_context_(NULL),
bytes_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_NONE),
channels_(0),
samples_per_second_(0),
av_sample_format_(0),
last_input_timestamp_(kNoTimestamp()),
- output_frames_to_drop_(0),
- av_frame_(NULL) {
+ output_frames_to_drop_(0) {
}
void FFmpegAudioDecoder::Initialize(
@@ -144,7 +143,7 @@ void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::Closure reset_cb = BindToCurrentLoop(closure);
- avcodec_flush_buffers(codec_context_);
+ avcodec_flush_buffers(codec_context_.get());
ResetTimestampState();
queued_audio_.clear();
reset_cb.Run();
@@ -165,6 +164,12 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
AVSampleFormat format = static_cast<AVSampleFormat>(frame->format);
SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
int channels = DetermineChannels(frame);
+ if ((channels <= 0) || (channels >= limits::kMaxChannels)) {
+ DLOG(ERROR) << "Requested number of channels (" << channels
+ << ") exceeds limit.";
+ return AVERROR(EINVAL);
+ }
+
int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
if (frame->nb_samples <= 0)
return AVERROR(EINVAL);
@@ -178,6 +183,9 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
frame->nb_samples,
format,
AudioBuffer::kChannelAlignment);
+ // Check for errors from av_samples_get_buffer_size().
+ if (buffer_size_in_bytes < 0)
+ return buffer_size_in_bytes;
int frames_required = buffer_size_in_bytes / bytes_per_channel / channels;
DCHECK_GE(frames_required, frame->nb_samples);
scoped_refptr<AudioBuffer> buffer =
@@ -271,25 +279,22 @@ void FFmpegAudioDecoder::BufferReady(
return;
}
- bool is_vorbis = codec_context_->codec_id == AV_CODEC_ID_VORBIS;
if (!input->end_of_stream()) {
- if (last_input_timestamp_ == kNoTimestamp()) {
- if (is_vorbis && (input->timestamp() < base::TimeDelta())) {
- // Dropping frames for negative timestamps as outlined in section A.2
- // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html
- output_frames_to_drop_ = floor(
- 0.5 + -input->timestamp().InSecondsF() * samples_per_second_);
- } else {
- last_input_timestamp_ = input->timestamp();
- }
- } else if (input->timestamp() != kNoTimestamp()) {
- if (input->timestamp() < last_input_timestamp_) {
- base::TimeDelta diff = input->timestamp() - last_input_timestamp_;
- DVLOG(1) << "Input timestamps are not monotonically increasing! "
- << " ts " << input->timestamp().InMicroseconds() << " us"
- << " diff " << diff.InMicroseconds() << " us";
- base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
- return;
+ if (last_input_timestamp_ == kNoTimestamp() &&
+ codec_context_->codec_id == AV_CODEC_ID_VORBIS &&
+ input->timestamp() < base::TimeDelta()) {
+ // Dropping frames for negative timestamps as outlined in section A.2
+ // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html
+ output_frames_to_drop_ = floor(
+ 0.5 + -input->timestamp().InSecondsF() * samples_per_second_);
+ } else {
+ if (last_input_timestamp_ != kNoTimestamp() &&
+ input->timestamp() < last_input_timestamp_) {
+ const base::TimeDelta diff = input->timestamp() - last_input_timestamp_;
+ DLOG(WARNING)
+ << "Input timestamps are not monotonically increasing! "
+ << " ts " << input->timestamp().InMicroseconds() << " us"
+ << " diff " << diff.InMicroseconds() << " us";
}
last_input_timestamp_ = input->timestamp();
@@ -328,7 +333,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
return false;
}
- if (codec_context_ &&
+ if (codec_context_.get() &&
(bytes_per_channel_ != config.bytes_per_channel() ||
channel_layout_ != config.channel_layout() ||
samples_per_second_ != config.samples_per_second())) {
@@ -346,22 +351,22 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
ReleaseFFmpegResources();
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context3(NULL);
- AudioDecoderConfigToAVCodecContext(config, codec_context_);
+ codec_context_.reset(avcodec_alloc_context3(NULL));
+ AudioDecoderConfigToAVCodecContext(config, codec_context_.get());
codec_context_->opaque = this;
codec_context_->get_buffer2 = GetAudioBufferImpl;
codec_context_->refcounted_frames = 1;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
+ if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
DLOG(ERROR) << "Could not initialize audio decoder: "
<< codec_context_->codec_id;
return false;
}
// Success!
- av_frame_ = avcodec_alloc_frame();
+ av_frame_.reset(av_frame_alloc());
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
output_timestamp_helper_.reset(
@@ -385,14 +390,8 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
}
void FFmpegAudioDecoder::ReleaseFFmpegResources() {
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- }
-
- if (av_frame_)
- av_frame_free(&av_frame_);
+ codec_context_.reset();
+ av_frame_.reset();
}
void FFmpegAudioDecoder::ResetTimestampState() {
@@ -421,7 +420,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
do {
int frame_decoded = 0;
int result = avcodec_decode_audio4(
- codec_context_, av_frame_, &frame_decoded, &packet);
+ codec_context_.get(), av_frame_.get(), &frame_decoded, &packet);
if (result < 0) {
DCHECK(!input->end_of_stream())
@@ -429,14 +428,12 @@ void FFmpegAudioDecoder::RunDecodeLoop(
<< "This is quite possibly a bug in the audio decoder not handling "
<< "end of stream AVPackets correctly.";
- DLOG(ERROR)
- << "Error decoding an audio frame with timestamp: "
+ DLOG(WARNING)
+ << "Failed to decode an audio frame with timestamp: "
<< input->timestamp().InMicroseconds() << " us, duration: "
<< input->duration().InMicroseconds() << " us, packet size: "
<< input->data_size() << " bytes";
- // TODO(dalecurtis): We should return a kDecodeError here instead:
- // http://crbug.com/145276
break;
}
@@ -461,7 +458,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
scoped_refptr<AudioBuffer> output;
int decoded_frames = 0;
int original_frames = 0;
- int channels = DetermineChannels(av_frame_);
+ int channels = DetermineChannels(av_frame_.get());
if (frame_decoded) {
if (av_frame_->sample_rate != samples_per_second_ ||
channels != channels_ ||
@@ -477,7 +474,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
// This is an unrecoverable error, so bail out.
QueuedAudioBuffer queue_entry = { kDecodeError, NULL };
queued_audio_.push_back(queue_entry);
- av_frame_unref(av_frame_);
+ av_frame_unref(av_frame_.get());
break;
}
@@ -500,7 +497,7 @@ void FFmpegAudioDecoder::RunDecodeLoop(
}
decoded_frames = output->frame_count();
- av_frame_unref(av_frame_);
+ av_frame_unref(av_frame_.get());
}
// WARNING: |av_frame_| no longer has valid data at this point.
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.h b/chromium/media/filters/ffmpeg_audio_decoder.h
index 44c98305f3d..296384796a2 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.h
+++ b/chromium/media/filters/ffmpeg_audio_decoder.h
@@ -8,6 +8,7 @@
#include <list>
#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
@@ -25,6 +26,8 @@ namespace media {
class AudioTimestampHelper;
class DecoderBuffer;
+class ScopedPtrAVFreeContext;
+class ScopedPtrAVFreeFrame;
// Helper structure for managing multiple decoded audio frames per packet.
struct QueuedAudioBuffer {
@@ -32,7 +35,6 @@ struct QueuedAudioBuffer {
scoped_refptr<AudioBuffer> buffer;
};
-
class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
public:
explicit FFmpegAudioDecoder(
@@ -72,7 +74,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
DemuxerStream* demuxer_stream_;
StatisticsCB statistics_cb_;
- AVCodecContext* codec_context_;
+ scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
// Decoded audio format.
int bytes_per_channel_;
@@ -92,7 +94,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
int output_frames_to_drop_;
// Holds decoded audio.
- AVFrame* av_frame_;
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
ReadCB read_cb_;
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index 723eb5f28d9..6b8027164bd 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -11,13 +11,12 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
-#include "base/command_line.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/sparse_histogram.h"
-#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
+#include "base/sys_byteorder.h"
#include "base/task_runner_util.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
@@ -26,11 +25,11 @@
#include "media/base/decrypt_config.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
-#include "media/base/media_switches.h"
#include "media/base/video_decoder_config.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
#include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
+#include "media/filters/webvtt_util.h"
#include "media/webm/webm_crypto_helpers.h"
namespace media {
@@ -64,6 +63,9 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(
AVStreamToVideoDecoderConfig(stream, &video_config_, true);
is_encrypted = video_config_.is_encrypted();
break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ type_ = TEXT;
+ break;
default:
NOTREACHED();
break;
@@ -114,27 +116,67 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
// keep this generic so that other side_data types in the future can be
// handled the same way as well.
av_packet_split_side_data(packet.get());
- int side_data_size = 0;
- uint8* side_data = av_packet_get_side_data(
- packet.get(),
- AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
- &side_data_size);
-
- // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
- // reference inner memory of FFmpeg. As such we should transfer the packet
- // into memory we control.
scoped_refptr<DecoderBuffer> buffer;
- if (side_data_size > 0) {
+
+ if (type() == DemuxerStream::TEXT) {
+ int id_size = 0;
+ uint8* id_data = av_packet_get_side_data(
+ packet.get(),
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+ &id_size);
+
+ int settings_size = 0;
+ uint8* settings_data = av_packet_get_side_data(
+ packet.get(),
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+ &settings_size);
+
+ std::vector<uint8> side_data;
+ MakeSideData(id_data, id_data + id_size,
+ settings_data, settings_data + settings_size,
+ &side_data);
+
buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
- side_data, side_data_size);
+ side_data.data(), side_data.size());
} else {
- buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size);
+ int side_data_size = 0;
+ uint8* side_data = av_packet_get_side_data(
+ packet.get(),
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+ &side_data_size);
+
+ // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
+ // reference inner memory of FFmpeg. As such we should transfer the packet
+ // into memory we control.
+ if (side_data_size > 0) {
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
+ side_data, side_data_size);
+ } else {
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size);
+ }
+
+ int skip_samples_size = 0;
+ uint8* skip_samples = av_packet_get_side_data(packet.get(),
+ AV_PKT_DATA_SKIP_SAMPLES,
+ &skip_samples_size);
+ const int kSkipSamplesValidSize = 10;
+ const int kSkipSamplesOffset = 4;
+ if (skip_samples_size >= kSkipSamplesValidSize) {
+ int discard_padding_samples = base::ByteSwapToLE32(
+ *(reinterpret_cast<const uint32*>(skip_samples +
+ kSkipSamplesOffset)));
+ // TODO(vigneshv): Change decoder buffer to use number of samples so that
+ // this conversion can be avoided.
+ buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
+ discard_padding_samples * 1000000.0 /
+ audio_decoder_config().samples_per_second()));
+ }
}
if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
(type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig(
- packet->data, packet->size,
+ packet->data, packet->size,
reinterpret_cast<const uint8*>(encryption_key_id_.data()),
encryption_key_id_.size()));
if (!config)
@@ -272,6 +314,27 @@ bool FFmpegDemuxerStream::HasAvailableCapacity() {
return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
}
+TextKind FFmpegDemuxerStream::GetTextKind() const {
+ DCHECK_EQ(type_, DemuxerStream::TEXT);
+
+ if (stream_->disposition & AV_DISPOSITION_CAPTIONS)
+ return kTextCaptions;
+
+ if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS)
+ return kTextDescriptions;
+
+ if (stream_->disposition & AV_DISPOSITION_METADATA)
+ return kTextMetadata;
+
+ return kTextSubtitles;
+}
+
+std::string FFmpegDemuxerStream::GetMetadata(const char* key) const {
+ const AVDictionaryEntry* entry =
+ av_dict_get(stream_->metadata, key, NULL, 0);
+ return (entry == NULL || entry->value == NULL) ? "" : entry->value;
+}
+
// static
base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
const AVRational& time_base, int64 timestamp) {
@@ -300,6 +363,7 @@ FFmpegDemuxer::FFmpegDemuxer(
bitrate_(0),
start_time_(kNoTimestamp()),
audio_disabled_(false),
+ text_enabled_(false),
duration_known_(false),
url_protocol_(data_source, BindToLoop(message_loop_, base::Bind(
&FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))),
@@ -345,11 +409,6 @@ void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
base::Bind(&FFmpegDemuxer::OnSeekFrameDone, weak_this_, cb));
}
-void FFmpegDemuxer::SetPlaybackRate(float playback_rate) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- data_source_->SetPlaybackRate(playback_rate);
-}
-
void FFmpegDemuxer::OnAudioRendererDisabled() {
DCHECK(message_loop_->BelongsToCurrentThread());
audio_disabled_ = true;
@@ -362,10 +421,12 @@ void FFmpegDemuxer::OnAudioRendererDisabled() {
}
void FFmpegDemuxer::Initialize(DemuxerHost* host,
- const PipelineStatusCB& status_cb) {
+ const PipelineStatusCB& status_cb,
+ bool enable_text_tracks) {
DCHECK(message_loop_->BelongsToCurrentThread());
host_ = host;
weak_this_ = weak_factory_.GetWeakPtr();
+ text_enabled_ = enable_text_tracks;
// TODO(scherkus): DataSource should have a host by this point,
// see http://crbug.com/122071
@@ -409,6 +470,25 @@ base::TimeDelta FFmpegDemuxer::GetStartTime() const {
return start_time_;
}
+void FFmpegDemuxer::AddTextStreams() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) {
+ FFmpegDemuxerStream* stream = streams_[idx];
+ if (stream == NULL || stream->type() != DemuxerStream::TEXT)
+ continue;
+
+ TextKind kind = stream->GetTextKind();
+ std::string title = stream->GetMetadata("title");
+ std::string language = stream->GetMetadata("language");
+
+ // TODO: Implement "id" metadata in FFMPEG.
+ // See: http://crbug.com/323183
+ host_->AddTextStream(stream, TextTrackConfig(kind, title, language,
+ std::string()));
+ }
+}
+
// Helper for calculating the bitrate of the media based on information stored
// in |format_context| or failing that the size and duration of the media.
//
@@ -527,6 +607,10 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
if (!video_config.IsValidConfig())
continue;
video_stream = stream;
+ } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) {
+ continue;
+ }
} else {
continue;
}
@@ -547,6 +631,9 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
return;
}
+ if (text_enabled_)
+ AddTextStreams();
+
if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
// If there is a duration value in the container use that to find the
// maximum between it and the duration from A/V streams.
@@ -758,6 +845,19 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
packet.swap(new_packet);
}
+ // Special case for opus in ogg. FFmpeg is pre-trimming the codec delay
+ // from the packet timestamp. Chrome expects to handle this itself inside
+ // the decoder, so shift timestamps by the delay in this case.
+ // TODO(dalecurtis): Try to get fixed upstream. See http://crbug.com/328207
+ if (strcmp(glue_->format_context()->iformat->name, "ogg") == 0) {
+ const AVCodecContext* codec_context =
+ glue_->format_context()->streams[packet->stream_index]->codec;
+ if (codec_context->codec_id == AV_CODEC_ID_OPUS &&
+ codec_context->delay > 0) {
+ packet->pts += codec_context->delay;
+ }
+ }
+
FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index];
demuxer_stream->EnqueuePacket(packet.Pass());
}
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index 7304beab244..69f6c969ab0 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -34,6 +34,7 @@
#include "media/base/decoder_buffer_queue.h"
#include "media/base/demuxer.h"
#include "media/base/pipeline.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/filters/blocking_url_protocol.h"
@@ -93,6 +94,12 @@ class FFmpegDemuxerStream : public DemuxerStream {
// Returns true if this stream has capacity for additional data.
bool HasAvailableCapacity();
+ TextKind GetTextKind() const;
+
+ // Returns the value associated with |key| in the metadata for the avstream.
+ // Returns an empty string if the key is not present.
+ std::string GetMetadata(const char* key) const;
+
private:
friend class FFmpegDemuxerTest;
@@ -136,11 +143,11 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// Demuxer implementation.
virtual void Initialize(DemuxerHost* host,
- const PipelineStatusCB& status_cb) OVERRIDE;
+ const PipelineStatusCB& status_cb,
+ bool enable_text_tracks) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
virtual void OnAudioRendererDisabled() OVERRIDE;
- virtual void SetPlaybackRate(float playback_rate) OVERRIDE;
virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
virtual base::TimeDelta GetStartTime() const OVERRIDE;
@@ -185,6 +192,10 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// FFmpegDemuxerStream.
FFmpegDemuxerStream* GetFFmpegStream(DemuxerStream::Type type) const;
+ // Called after the streams have been collected from the media, to allow
+ // the text renderer to bind each text stream to the cue rendering engine.
+ void AddTextStreams();
+
DemuxerHost* host_;
scoped_refptr<base::MessageLoopProxy> message_loop_;
@@ -234,6 +245,9 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
// drops packets destined for AUDIO demuxer streams on the floor).
bool audio_disabled_;
+ // Whether text streams have been enabled for this demuxer.
+ bool text_enabled_;
+
// Set if we know duration of the audio stream. Used when processing end of
// stream -- at this moment we definitely know duration.
bool duration_known_;
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index f5b0e978fe4..7c6fcb5b11a 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -83,6 +83,7 @@ class FFmpegDemuxerTest : public testing::Test {
Demuxer::NeedKeyCB need_key_cb =
base::Bind(&FFmpegDemuxerTest::NeedKeyCB, base::Unretained(this));
+
demuxer_.reset(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
data_source_.get(),
need_key_cb,
@@ -91,13 +92,17 @@ class FFmpegDemuxerTest : public testing::Test {
MOCK_METHOD1(CheckPoint, void(int v));
- void InitializeDemuxer() {
+ void InitializeDemuxerText(bool enable_text) {
EXPECT_CALL(host_, SetDuration(_));
WaitableMessageLoopEvent event;
- demuxer_->Initialize(&host_, event.GetPipelineStatusCB());
+ demuxer_->Initialize(&host_, event.GetPipelineStatusCB(), enable_text);
event.RunAndWaitForStatus(PIPELINE_OK);
}
+ void InitializeDemuxer() {
+ InitializeDemuxerText(false);
+ }
+
MOCK_METHOD2(OnReadDoneCalled, void(int, int64));
// Verifies that |buffer| has a specific |size| and |timestamp|.
@@ -199,7 +204,7 @@ TEST_F(FFmpegDemuxerTest, Initialize_OpenFails) {
// Simulate avformat_open_input() failing.
CreateDemuxer("ten_byte_file");
WaitableMessageLoopEvent event;
- demuxer_->Initialize(&host_, event.GetPipelineStatusCB());
+ demuxer_->Initialize(&host_, event.GetPipelineStatusCB(), true);
event.RunAndWaitForStatus(DEMUXER_ERROR_COULD_NOT_OPEN);
}
@@ -217,7 +222,7 @@ TEST_F(FFmpegDemuxerTest, Initialize_NoStreams) {
// Open a file with no streams whatsoever.
CreateDemuxer("no_streams.webm");
WaitableMessageLoopEvent event;
- demuxer_->Initialize(&host_, event.GetPipelineStatusCB());
+ demuxer_->Initialize(&host_, event.GetPipelineStatusCB(), true);
event.RunAndWaitForStatus(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
}
@@ -225,7 +230,7 @@ TEST_F(FFmpegDemuxerTest, Initialize_NoAudioVideo) {
// Open a file containing streams but none of which are audio/video streams.
CreateDemuxer("no_audio_video.webm");
WaitableMessageLoopEvent event;
- demuxer_->Initialize(&host_, event.GetPipelineStatusCB());
+ demuxer_->Initialize(&host_, event.GetPipelineStatusCB(), true);
event.RunAndWaitForStatus(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
}
@@ -298,6 +303,36 @@ TEST_F(FFmpegDemuxerTest, Initialize_Multitrack) {
EXPECT_FALSE(demuxer_->GetStream(DemuxerStream::UNKNOWN));
}
+TEST_F(FFmpegDemuxerTest, Initialize_MultitrackText) {
+ // Open a file containing the following streams:
+ // Stream #0: Video (VP8)
+ // Stream #1: Audio (Vorbis)
+ // Stream #2: Text (WebVTT)
+
+ CreateDemuxer("bear-vp8-webvtt.webm");
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ InitializeDemuxerText(true);
+ ASSERT_TRUE(text_stream);
+ EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
+
+ // Video stream should be VP8.
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ EXPECT_EQ(DemuxerStream::VIDEO, stream->type());
+ EXPECT_EQ(kCodecVP8, stream->video_decoder_config().codec());
+
+ // Audio stream should be Vorbis.
+ stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ ASSERT_TRUE(stream);
+ EXPECT_EQ(DemuxerStream::AUDIO, stream->type());
+ EXPECT_EQ(kCodecVorbis, stream->audio_decoder_config().codec());
+
+ // Unknown stream should never be present.
+ EXPECT_FALSE(demuxer_->GetStream(DemuxerStream::UNKNOWN));
+}
+
TEST_F(FFmpegDemuxerTest, Initialize_Encrypted) {
EXPECT_CALL(*this, NeedKeyCBMock(kWebMEncryptInitDataType, NotNull(),
DecryptConfig::kDecryptionKeySize))
@@ -337,6 +372,23 @@ TEST_F(FFmpegDemuxerTest, Read_Video) {
message_loop_.Run();
}
+TEST_F(FFmpegDemuxerTest, Read_Text) {
+ // We test that on a successful text packet read.
+ CreateDemuxer("bear-vp8-webvtt.webm");
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ InitializeDemuxerText(true);
+ ASSERT_TRUE(text_stream);
+ EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
+
+ text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
+ message_loop_.Run();
+
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
+ message_loop_.Run();
+}
+
TEST_F(FFmpegDemuxerTest, Read_VideoNonZeroStart) {
// Test the start time is the first timestamp of the video and audio stream.
CreateDemuxer("nonzero-start-time.webm");
@@ -365,6 +417,26 @@ TEST_F(FFmpegDemuxerTest, Read_EndOfStream) {
ReadUntilEndOfStream();
}
+TEST_F(FFmpegDemuxerTest, Read_EndOfStreamText) {
+ // Verify that end of stream buffers are created.
+ CreateDemuxer("bear-vp8-webvtt.webm");
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ InitializeDemuxerText(true);
+ ASSERT_TRUE(text_stream);
+ EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
+
+ bool got_eos_buffer = false;
+ const int kMaxBuffers = 10;
+ for (int i = 0; !got_eos_buffer && i < kMaxBuffers; i++) {
+ text_stream->Read(base::Bind(&EosOnReadDone, &got_eos_buffer));
+ message_loop_.Run();
+ }
+
+ EXPECT_TRUE(got_eos_buffer);
+}
+
TEST_F(FFmpegDemuxerTest, Read_EndOfStream_NoDuration) {
// Verify that end of stream buffers are created.
CreateDemuxer("bear-320x240.webm");
@@ -413,6 +485,58 @@ TEST_F(FFmpegDemuxerTest, Seek) {
message_loop_.Run();
}
+TEST_F(FFmpegDemuxerTest, SeekText) {
+ // We're testing that the demuxer frees all queued packets when it receives
+ // a Seek().
+ CreateDemuxer("bear-vp8-webvtt.webm");
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ InitializeDemuxerText(true);
+ ASSERT_TRUE(text_stream);
+ EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
+
+ // Get our streams.
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+ ASSERT_TRUE(video);
+ ASSERT_TRUE(audio);
+
+ // Read a text packet and release it.
+ text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
+ message_loop_.Run();
+
+ // Issue a simple forward seek, which should discard queued packets.
+ WaitableMessageLoopEvent event;
+ demuxer_->Seek(base::TimeDelta::FromMicroseconds(1000000),
+ event.GetPipelineStatusCB());
+ event.RunAndWaitForStatus(PIPELINE_OK);
+
+ // Audio read #1.
+ audio->Read(NewReadCB(FROM_HERE, 145, 803000));
+ message_loop_.Run();
+
+ // Audio read #2.
+ audio->Read(NewReadCB(FROM_HERE, 148, 826000));
+ message_loop_.Run();
+
+ // Video read #1.
+ video->Read(NewReadCB(FROM_HERE, 5425, 801000));
+ message_loop_.Run();
+
+ // Video read #2.
+ video->Read(NewReadCB(FROM_HERE, 1906, 834000));
+ message_loop_.Run();
+
+ // Text read #1.
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
+ message_loop_.Run();
+
+ // Text read #2.
+ text_stream->Read(NewReadCB(FROM_HERE, 19, 1000000));
+ message_loop_.Run();
+}
+
class MockReadCB {
public:
MockReadCB() {}
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index d03648c9b13..b8757657548 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -58,9 +58,7 @@ FFmpegVideoDecoder::FFmpegVideoDecoder(
const scoped_refptr<base::MessageLoopProxy>& message_loop)
: message_loop_(message_loop),
weak_factory_(this),
- state_(kUninitialized),
- codec_context_(NULL),
- av_frame_(NULL) {
+ state_(kUninitialized) {
}
int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
@@ -71,9 +69,10 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
// updated width/height/pix_fmt, which can change for adaptive
// content.
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
- if (format == VideoFrame::INVALID)
+ if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
- DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16);
+ DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
+ format == VideoFrame::YV12J);
gfx::Size size(codec_context->width, codec_context->height);
int ret;
@@ -93,8 +92,8 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
return AVERROR(EINVAL);
scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size,
- kNoTimestamp());
+ frame_pool_.CreateFrame(format, size, gfx::Rect(size),
+ natural_size, kNoTimestamp());
for (int i = 0; i < 3; i++) {
frame->base[i] = video_frame->data(i);
@@ -105,8 +104,6 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
frame->opaque = NULL;
video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
frame->type = FF_BUFFER_TYPE_USER;
- frame->pkt_pts = codec_context->pkt ? codec_context->pkt->pts :
- AV_NOPTS_VALUE;
frame->width = codec_context->width;
frame->height = codec_context->height;
frame->format = codec_context->pix_fmt;
@@ -167,7 +164,7 @@ void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
return;
}
@@ -189,7 +186,7 @@ void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
void FFmpegVideoDecoder::DoReset() {
DCHECK(decode_cb_.is_null());
- avcodec_flush_buffers(codec_context_);
+ avcodec_flush_buffers(codec_context_.get());
state_ = kNormal;
base::ResetAndReturn(&reset_cb_).Run();
}
@@ -273,7 +270,7 @@ void FFmpegVideoDecoder::DecodeBuffer(
DCHECK(buffer->end_of_stream());
state_ = kDecodeFinished;
base::ResetAndReturn(&decode_cb_)
- .Run(kOk, VideoFrame::CreateEmptyFrame());
+ .Run(kOk, VideoFrame::CreateEOSFrame());
return;
}
@@ -290,7 +287,7 @@ bool FFmpegVideoDecoder::FFmpegDecode(
DCHECK(video_frame);
// Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
+ avcodec_get_frame_defaults(av_frame_.get());
// Create a packet for input data.
// Due to FFmpeg API changes we no longer have const read-only pointers.
@@ -312,8 +309,8 @@ bool FFmpegVideoDecoder::FFmpegDecode(
}
int frame_decoded = 0;
- int result = avcodec_decode_video2(codec_context_,
- av_frame_,
+ int result = avcodec_decode_video2(codec_context_.get(),
+ av_frame_.get(),
&frame_decoded,
&packet);
// Log the problem if we can't decode a video frame and exit early.
@@ -356,16 +353,8 @@ bool FFmpegVideoDecoder::FFmpegDecode(
}
void FFmpegVideoDecoder::ReleaseFFmpegResources() {
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- codec_context_ = NULL;
- }
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
+ codec_context_.reset();
+ av_frame_.reset();
}
bool FFmpegVideoDecoder::ConfigureDecoder() {
@@ -373,8 +362,8 @@ bool FFmpegVideoDecoder::ConfigureDecoder() {
ReleaseFFmpegResources();
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context3(NULL);
- VideoDecoderConfigToAVCodecContext(config_, codec_context_);
+ codec_context_.reset(avcodec_alloc_context3(NULL));
+ VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
// Enable motion vector search (potentially slow), strong deblocking filter
// for damaged macroblocks, and set our error detection sensitivity.
@@ -386,12 +375,12 @@ bool FFmpegVideoDecoder::ConfigureDecoder() {
codec_context_->release_buffer = ReleaseVideoBufferImpl;
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
+ if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
ReleaseFFmpegResources();
return false;
}
- av_frame_ = avcodec_alloc_frame();
+ av_frame_.reset(av_frame_alloc());
return true;
}
diff --git a/chromium/media/filters/ffmpeg_video_decoder.h b/chromium/media/filters/ffmpeg_video_decoder.h
index 1f032266951..28bb4e0d0cc 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.h
+++ b/chromium/media/filters/ffmpeg_video_decoder.h
@@ -8,9 +8,11 @@
#include <list>
#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
+#include "media/base/video_frame_pool.h"
struct AVCodecContext;
struct AVFrame;
@@ -22,6 +24,8 @@ class MessageLoopProxy;
namespace media {
class DecoderBuffer;
+class ScopedPtrAVFreeContext;
+class ScopedPtrAVFreeFrame;
class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
public:
@@ -77,11 +81,13 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
base::Closure reset_cb_;
// FFmpeg structures owned by this object.
- AVCodecContext* codec_context_;
- AVFrame* av_frame_;
+ scoped_ptr_malloc<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
VideoDecoderConfig config_;
+ VideoFramePool frame_pool_;
+
DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoder);
};
diff --git a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
index 23ee1961a75..9663dd13604 100644
--- a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -99,7 +99,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_EQ(VideoDecoder::kOk, status);
ASSERT_TRUE(video_frame.get());
- EXPECT_FALSE(video_frame->IsEndOfStream());
+ EXPECT_FALSE(video_frame->end_of_stream());
}
// Sets up expectations and actions to put FFmpegVideoDecoder in an end
@@ -110,7 +110,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
DecodeSingleFrame(end_of_stream_buffer_, &status, &video_frame);
EXPECT_EQ(VideoDecoder::kOk, status);
ASSERT_TRUE(video_frame.get());
- EXPECT_TRUE(video_frame->IsEndOfStream());
+ EXPECT_TRUE(video_frame->end_of_stream());
}
typedef std::vector<scoped_refptr<DecoderBuffer> > InputBuffers;
@@ -140,7 +140,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
switch (status) {
case VideoDecoder::kOk:
DCHECK(frame);
- if (!frame->IsEndOfStream()) {
+ if (!frame->end_of_stream()) {
output_frames->push_back(frame);
continue;
} else { // EOS
@@ -177,7 +177,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
if (output_frames.size() == 1U)
*video_frame = output_frames[0];
else
- *video_frame = VideoFrame::CreateEmptyFrame();
+ *video_frame = VideoFrame::CreateEOSFrame();
}
// Decodes |i_frame_buffer_| and then decodes the data contained in
@@ -253,7 +253,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedDecoder) {
TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedPixelFormat) {
// Ensure decoder handles unsupported pixel formats without crashing.
VideoDecoderConfig config(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
- VideoFrame::INVALID,
+ VideoFrame::UNKNOWN,
kCodedSize, kVisibleRect, kNaturalSize,
NULL, 0, false);
InitializeWithConfigAndStatus(config, DECODER_ERROR_NOT_SUPPORTED);
@@ -359,7 +359,7 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_Normal) {
EXPECT_EQ(VideoDecoder::kOk, status);
ASSERT_TRUE(video_frame.get());
- EXPECT_FALSE(video_frame->IsEndOfStream());
+ EXPECT_FALSE(video_frame->end_of_stream());
}
// Verify current behavior for 0 byte frames. FFmpeg simply ignores
@@ -381,8 +381,8 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_0ByteFrame) {
EXPECT_EQ(VideoDecoder::kOk, status);
ASSERT_EQ(2U, output_frames.size());
- EXPECT_FALSE(output_frames[0]->IsEndOfStream());
- EXPECT_FALSE(output_frames[1]->IsEndOfStream());
+ EXPECT_FALSE(output_frames[0]->end_of_stream());
+ EXPECT_FALSE(output_frames[1]->end_of_stream());
}
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeError) {
@@ -422,7 +422,7 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeErrorAtEndOfStream) {
EXPECT_EQ(VideoDecoder::kOk, status);
ASSERT_TRUE(video_frame.get());
- EXPECT_TRUE(video_frame->IsEndOfStream());
+ EXPECT_TRUE(video_frame->end_of_stream());
}
// Decode |i_frame_buffer_| and then a frame with a larger width and verify
diff --git a/chromium/media/filters/gpu_video_accelerator_factories.h b/chromium/media/filters/gpu_video_accelerator_factories.h
index 3ee79ac6a5c..c152c2a4bda 100644
--- a/chromium/media/filters/gpu_video_accelerator_factories.h
+++ b/chromium/media/filters/gpu_video_accelerator_factories.h
@@ -45,7 +45,6 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories
// Read pixels from a native texture and store into |pixels| as RGBA.
virtual void ReadPixels(uint32 texture_id,
- uint32 texture_target,
const gfx::Size& size,
const SkBitmap& pixels) = 0;
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 273542e85ee..6f2fe93c0ab 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -10,6 +10,7 @@
#include "base/callback_helpers.h"
#include "base/cpu.h"
#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
#include "base/stl_util.h"
#include "base/task_runner_util.h"
#include "media/base/bind_to_loop.h"
@@ -91,7 +92,7 @@ void GpuVideoDecoder::Reset(const base::Closure& closure) {
}
if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEmptyFrame());
+ EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
DCHECK(pending_reset_cb_.is_null());
pending_reset_cb_ = BindToCurrentLoop(closure);
@@ -104,7 +105,7 @@ void GpuVideoDecoder::Stop(const base::Closure& closure) {
if (vda_)
DestroyVDA();
if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEmptyFrame());
+ EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
if (!pending_reset_cb_.is_null())
base::ResetAndReturn(&pending_reset_cb_).Run();
BindToCurrentLoop(closure).Run();
@@ -126,6 +127,17 @@ static bool IsCodedSizeSupported(const gfx::Size& coded_size) {
return os_large_video_support && hw_large_video_support;
}
+// Report |status| to UMA and run |cb| with it. This is super-specific to the
+// UMA stat reported because the UMA_HISTOGRAM_ENUMERATION API requires a
+// callsite to always be called with the same stat name (can't parameterize it).
+static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
+ const PipelineStatusCB& cb,
+ PipelineStatus status) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.GpuVideoDecoderInitializeStatus", status, PIPELINE_STATUS_MAX);
+ cb.Run(status);
+}
+
void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
const PipelineStatusCB& orig_status_cb) {
DVLOG(3) << "Initialize()";
@@ -135,9 +147,9 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
weak_this_ = weak_factory_.GetWeakPtr();
- PipelineStatusCB status_cb = CreateUMAReportingPipelineCB(
- "Media.GpuVideoDecoderInitializeStatus",
- BindToCurrentLoop(orig_status_cb));
+ PipelineStatusCB status_cb =
+ base::Bind(&ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB,
+ BindToCurrentLoop(orig_status_cb));
bool previously_initialized = config_.IsValidConfig();
#if !defined(OS_CHROMEOS) && !defined(OS_WIN)
@@ -188,20 +200,14 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
status_cb.Run(PIPELINE_OK);
}
-void GpuVideoDecoder::DestroyTextures() {
- std::map<int32, PictureBuffer>::iterator it;
-
- for (it = assigned_picture_buffers_.begin();
- it != assigned_picture_buffers_.end(); ++it) {
+void GpuVideoDecoder::DestroyPictureBuffers(PictureBufferMap* buffers) {
+ DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
+ for (PictureBufferMap::iterator it = buffers->begin(); it != buffers->end();
+ ++it) {
factories_->DeleteTexture(it->second.texture_id());
}
- assigned_picture_buffers_.clear();
- for (it = dismissed_picture_buffers_.begin();
- it != dismissed_picture_buffers_.end(); ++it) {
- factories_->DeleteTexture(it->second.texture_id());
- }
- dismissed_picture_buffers_.clear();
+ buffers->clear();
}
void GpuVideoDecoder::DestroyVDA() {
@@ -210,7 +216,9 @@ void GpuVideoDecoder::DestroyVDA() {
if (vda_)
vda_.release()->Destroy();
- DestroyTextures();
+ DestroyPictureBuffers(&assigned_picture_buffers_);
+ // Not destroying PictureBuffers in |dismissed_picture_buffers_| yet, since
+ // their textures may still be in use by the user of this GpuVideoDecoder.
}
void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
@@ -335,7 +343,9 @@ bool GpuVideoDecoder::NeedsBitstreamConversion() const {
bool GpuVideoDecoder::CanReadWithoutStalling() const {
DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- return available_pictures_ > 0 || !ready_video_frames_.empty();
+ return
+ next_picture_buffer_id_ == 0 || // Decode() will ProvidePictureBuffers().
+ available_pictures_ > 0 || !ready_video_frames_.empty();
}
void GpuVideoDecoder::NotifyInitializeDone() {
@@ -386,8 +396,7 @@ void GpuVideoDecoder::DismissPictureBuffer(int32 id) {
DVLOG(3) << "DismissPictureBuffer(" << id << ")";
DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- std::map<int32, PictureBuffer>::iterator it =
- assigned_picture_buffers_.find(id);
+ PictureBufferMap::iterator it = assigned_picture_buffers_.find(id);
if (it == assigned_picture_buffers_.end()) {
NOTREACHED() << "Missing picture buffer: " << id;
return;
@@ -416,7 +425,7 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DVLOG(3) << "PictureReady()";
DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
- std::map<int32, PictureBuffer>::iterator it =
+ PictureBufferMap::iterator it =
assigned_picture_buffers_.find(picture.picture_buffer_id());
if (it == assigned_picture_buffers_.end()) {
NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id();
@@ -434,12 +443,12 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DCHECK(decoder_texture_target_);
scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture(
- new VideoFrame::MailboxHolder(
+ make_scoped_ptr(new VideoFrame::MailboxHolder(
pb.texture_mailbox(),
0, // sync_point
BindToCurrentLoop(base::Bind(&GpuVideoDecoder::ReusePictureBuffer,
weak_this_,
- picture.picture_buffer_id()))),
+ picture.picture_buffer_id())))),
decoder_texture_target_,
pb.size(),
visible_rect,
@@ -448,7 +457,6 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
base::Bind(&GpuVideoAcceleratorFactories::ReadPixels,
factories_,
pb.texture_id(),
- decoder_texture_target_,
gfx::Size(visible_rect.width(), visible_rect.height())),
base::Closure()));
CHECK_GT(available_pictures_, 0);
@@ -495,7 +503,7 @@ void GpuVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id,
size_t num_erased = picture_buffers_at_display_.erase(picture_buffer_id);
DCHECK(num_erased);
- std::map<int32, PictureBuffer>::iterator it =
+ PictureBufferMap::iterator it =
assigned_picture_buffers_.find(picture_buffer_id);
if (it == assigned_picture_buffers_.end()) {
@@ -556,6 +564,7 @@ void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
}
GpuVideoDecoder::~GpuVideoDecoder() {
+ DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
DCHECK(!vda_.get()); // Stop should have been already called.
DCHECK(pending_decode_cb_.is_null());
for (size_t i = 0; i < available_shm_segments_.size(); ++i) {
@@ -570,7 +579,8 @@ GpuVideoDecoder::~GpuVideoDecoder() {
}
bitstream_buffers_in_decoder_.clear();
- DestroyTextures();
+ DestroyPictureBuffers(&assigned_picture_buffers_);
+ DestroyPictureBuffers(&dismissed_picture_buffers_);
}
void GpuVideoDecoder::NotifyFlushDone() {
@@ -578,7 +588,7 @@ void GpuVideoDecoder::NotifyFlushDone() {
DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
DCHECK_EQ(state_, kDrainingDecoder);
state_ = kDecoderDrained;
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEmptyFrame());
+ EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
}
void GpuVideoDecoder::NotifyResetDone() {
@@ -594,7 +604,7 @@ void GpuVideoDecoder::NotifyResetDone() {
base::ResetAndReturn(&pending_reset_cb_).Run();
if (!pending_decode_cb_.is_null())
- EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEmptyFrame());
+ EnqueueFrameAndTriggerFrameDelivery(VideoFrame::CreateEOSFrame());
}
void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index f7fff52e3fa..5f43d84abbc 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -74,6 +74,24 @@ class MEDIA_EXPORT GpuVideoDecoder
kError
};
+ // A shared memory segment and its allocated size.
+ struct SHMBuffer {
+ SHMBuffer(base::SharedMemory* m, size_t s);
+ ~SHMBuffer();
+ base::SharedMemory* shm;
+ size_t size;
+ };
+
+ // A SHMBuffer and the DecoderBuffer its data came from.
+ struct BufferPair {
+ BufferPair(SHMBuffer* s, const scoped_refptr<DecoderBuffer>& b);
+ ~BufferPair();
+ SHMBuffer* shm_buffer;
+ scoped_refptr<DecoderBuffer> buffer;
+ };
+
+ typedef std::map<int32, PictureBuffer> PictureBufferMap;
+
// Return true if more decode work can be piled on to the VDA.
bool CanMoreDecodeWorkBeDone();
@@ -95,14 +113,6 @@ class MEDIA_EXPORT GpuVideoDecoder
void DestroyVDA();
- // A shared memory segment and its allocated size.
- struct SHMBuffer {
- SHMBuffer(base::SharedMemory* m, size_t s);
- ~SHMBuffer();
- base::SharedMemory* shm;
- size_t size;
- };
-
// Request a shared-memory segment of at least |min_size| bytes. Will
// allocate as necessary. Caller does not own returned pointer.
SHMBuffer* GetSHM(size_t min_size);
@@ -110,7 +120,8 @@ class MEDIA_EXPORT GpuVideoDecoder
// Return a shared-memory segment to the available pool.
void PutSHM(SHMBuffer* shm_buffer);
- void DestroyTextures();
+ // Destroy all PictureBuffers in |buffers|, and delete their textures.
+ void DestroyPictureBuffers(PictureBufferMap* buffers);
bool needs_bitstream_conversion_;
@@ -141,16 +152,9 @@ class MEDIA_EXPORT GpuVideoDecoder
scoped_refptr<MediaLog> media_log_;
- // Book-keeping variables.
- struct BufferPair {
- BufferPair(SHMBuffer* s, const scoped_refptr<DecoderBuffer>& b);
- ~BufferPair();
- SHMBuffer* shm_buffer;
- scoped_refptr<DecoderBuffer> buffer;
- };
std::map<int32, BufferPair> bitstream_buffers_in_decoder_;
- std::map<int32, PictureBuffer> assigned_picture_buffers_;
- std::map<int32, PictureBuffer> dismissed_picture_buffers_;
+ PictureBufferMap assigned_picture_buffers_;
+ PictureBufferMap dismissed_picture_buffers_;
// PictureBuffers given to us by VDA via PictureReady, which we sent forward
// as VideoFrames to be rendered via decode_cb_, and which will be returned
// to us via ReusePictureBuffer.
diff --git a/chromium/media/filters/mock_gpu_video_accelerator_factories.h b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
index 8aa432d8cfc..1dfac3da8b4 100644
--- a/chromium/media/filters/mock_gpu_video_accelerator_factories.h
+++ b/chromium/media/filters/mock_gpu_video_accelerator_factories.h
@@ -42,9 +42,8 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
uint32 texture_target));
MOCK_METHOD1(DeleteTexture, void(uint32 texture_id));
MOCK_METHOD1(WaitSyncPoint, void(uint32 sync_point));
- MOCK_METHOD4(ReadPixels,
+ MOCK_METHOD3(ReadPixels,
void(uint32 texture_id,
- uint32 texture_target,
const gfx::Size& size,
const SkBitmap& pixels));
MOCK_METHOD1(CreateSharedMemory, base::SharedMemory*(size_t size));
diff --git a/chromium/media/filters/opus_audio_decoder.cc b/chromium/media/filters/opus_audio_decoder.cc
index b3e903b2313..c1de6df2158 100644
--- a/chromium/media/filters/opus_audio_decoder.cc
+++ b/chromium/media/filters/opus_audio_decoder.cc
@@ -25,20 +25,15 @@
namespace media {
static uint16 ReadLE16(const uint8* data, size_t data_size, int read_offset) {
- DCHECK(data);
uint16 value = 0;
DCHECK_LE(read_offset + sizeof(value), data_size);
memcpy(&value, data + read_offset, sizeof(value));
return base::ByteSwapToLE16(value);
}
-// Returns true if the decode result was end of stream.
-static inline bool IsEndOfStream(int decoded_size,
- const scoped_refptr<DecoderBuffer>& input) {
- // Two conditions to meet to declare end of stream for this decoder:
- // 1. Opus didn't output anything.
- // 2. An end of stream buffer is received.
- return decoded_size == 0 && input->end_of_stream();
+static int TimeDeltaToAudioFrames(base::TimeDelta time_delta,
+ int frame_rate) {
+ return std::ceil(time_delta.InSecondsF() * frame_rate);
}
// The Opus specification is part of IETF RFC 6716:
@@ -50,15 +45,8 @@ static inline bool IsEndOfStream(int decoded_size,
// http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
static const int kMaxVorbisChannels = 8;
-// Opus allows for decode of S16 or float samples. OpusAudioDecoder always uses
-// S16 samples.
-static const int kBitsPerChannel = 16;
-static const int kBytesPerChannel = kBitsPerChannel / 8;
-
// Maximum packet size used in Xiph's opusdec and FFmpeg's libopusdec.
-static const int kMaxOpusOutputPacketSizeSamples = 960 * 6 * kMaxVorbisChannels;
-static const int kMaxOpusOutputPacketSizeBytes =
- kMaxOpusOutputPacketSizeSamples * kBytesPerChannel;
+static const int kMaxOpusOutputPacketSizeSamples = 960 * 6;
static void RemapOpusChannelLayout(const uint8* opus_mapping,
int num_channels,
@@ -128,7 +116,7 @@ static void RemapOpusChannelLayout(const uint8* opus_mapping,
channel_layout[channel] = opus_mapping[vorbis_layout_offset[channel]];
}
-// Opus Header contents:
+// Opus Extra Data contents:
// - "OpusHead" (64 bits)
// - version number (8 bits)
// - Channels C (8 bits)
@@ -152,94 +140,116 @@ static void RemapOpusChannelLayout(const uint8* opus_mapping,
// - stream = byte-M
// Default audio output channel layout. Used to initialize |stream_map| in
-// OpusHeader, and passed to opus_multistream_decoder_create() when the header
-// does not contain mapping information. The values are valid only for mono and
-// stereo output: Opus streams with more than 2 channels require a stream map.
+// OpusExtraData, and passed to opus_multistream_decoder_create() when the
+// extra data does not contain mapping information. The values are valid only
+// for mono and stereo output: Opus streams with more than 2 channels require a
+// stream map.
static const int kMaxChannelsWithDefaultLayout = 2;
static const uint8 kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = {
0, 1 };
-// Size of the Opus header excluding optional mapping information.
-static const int kOpusHeaderSize = 19;
+// Size of the Opus extra data excluding optional mapping information.
+static const int kOpusExtraDataSize = 19;
+
+// Offset to the channel count byte in the Opus extra data.
+static const int kOpusExtraDataChannelsOffset = 9;
-// Offset to the channel count byte in the Opus header.
-static const int kOpusHeaderChannelsOffset = 9;
+// Offset to the pre-skip value in the Opus extra data.
+static const int kOpusExtraDataSkipSamplesOffset = 10;
-// Offset to the pre-skip value in the Opus header.
-static const int kOpusHeaderSkipSamplesOffset = 10;
+// Offset to the gain value in the Opus extra data.
+static const int kOpusExtraDataGainOffset = 16;
-// Offset to the channel mapping byte in the Opus header.
-static const int kOpusHeaderChannelMappingOffset = 18;
+// Offset to the channel mapping byte in the Opus extra data.
+static const int kOpusExtraDataChannelMappingOffset = 18;
-// Header contains a stream map. The mapping values are in extra data beyond
-// the always present |kOpusHeaderSize| bytes of data. The mapping data
+// Extra Data contains a stream map. The mapping values are in extra data beyond
+// the always present |kOpusExtraDataSize| bytes of data. The mapping data
// contains stream count, coupling information, and per channel mapping values:
// - Byte 0: Number of streams.
// - Byte 1: Number coupled.
-// - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping values.
-static const int kOpusHeaderNumStreamsOffset = kOpusHeaderSize;
-static const int kOpusHeaderNumCoupledOffset = kOpusHeaderNumStreamsOffset + 1;
-static const int kOpusHeaderStreamMapOffset = kOpusHeaderNumStreamsOffset + 2;
-
-struct OpusHeader {
- OpusHeader()
+// - Byte 2: Starting at byte 2 are |extra_data->channels| uint8 mapping
+// values.
+static const int kOpusExtraDataNumStreamsOffset = kOpusExtraDataSize;
+static const int kOpusExtraDataNumCoupledOffset =
+ kOpusExtraDataNumStreamsOffset + 1;
+static const int kOpusExtraDataStreamMapOffset =
+ kOpusExtraDataNumStreamsOffset + 2;
+
+struct OpusExtraData {
+ OpusExtraData()
: channels(0),
skip_samples(0),
channel_mapping(0),
num_streams(0),
- num_coupled(0) {
+ num_coupled(0),
+ gain_db(0) {
memcpy(stream_map,
kDefaultOpusChannelLayout,
kMaxChannelsWithDefaultLayout);
}
int channels;
- int skip_samples;
+ uint16 skip_samples;
int channel_mapping;
int num_streams;
int num_coupled;
+ int16 gain_db;
uint8 stream_map[kMaxVorbisChannels];
};
-// Returns true when able to successfully parse and store Opus header data in
-// data parsed in |header|. Based on opus header parsing code in libopusdec
-// from FFmpeg, and opus_header from Xiph's opus-tools project.
-static void ParseOpusHeader(const uint8* data, int data_size,
- const AudioDecoderConfig& config,
- OpusHeader* header) {
- CHECK_GE(data_size, kOpusHeaderSize);
+// Returns true when able to successfully parse and store Opus extra data in
+// |extra_data|. Based on opus header parsing code in libopusdec from FFmpeg,
+// and opus_header from Xiph's opus-tools project.
+static bool ParseOpusExtraData(const uint8* data, int data_size,
+ const AudioDecoderConfig& config,
+ OpusExtraData* extra_data) {
+ if (data_size < kOpusExtraDataSize) {
+ DLOG(ERROR) << "Extra data size is too small:" << data_size;
+ return false;
+ }
- header->channels = *(data + kOpusHeaderChannelsOffset);
+ extra_data->channels = *(data + kOpusExtraDataChannelsOffset);
- CHECK(header->channels > 0 && header->channels <= kMaxVorbisChannels)
- << "invalid channel count in header: " << header->channels;
+ if (extra_data->channels <= 0 || extra_data->channels > kMaxVorbisChannels) {
+ DLOG(ERROR) << "invalid channel count in extra data: "
+ << extra_data->channels;
+ return false;
+ }
- header->skip_samples =
- ReadLE16(data, data_size, kOpusHeaderSkipSamplesOffset);
+ extra_data->skip_samples =
+ ReadLE16(data, data_size, kOpusExtraDataSkipSamplesOffset);
+ extra_data->gain_db = static_cast<int16>(
+ ReadLE16(data, data_size, kOpusExtraDataGainOffset));
- header->channel_mapping = *(data + kOpusHeaderChannelMappingOffset);
+ extra_data->channel_mapping = *(data + kOpusExtraDataChannelMappingOffset);
- if (!header->channel_mapping) {
- CHECK_LE(header->channels, kMaxChannelsWithDefaultLayout)
- << "Invalid header, missing stream map.";
+ if (!extra_data->channel_mapping) {
+ if (extra_data->channels > kMaxChannelsWithDefaultLayout) {
+ DLOG(ERROR) << "Invalid extra data, missing stream map.";
+ return false;
+ }
- header->num_streams = 1;
- header->num_coupled =
+ extra_data->num_streams = 1;
+ extra_data->num_coupled =
(ChannelLayoutToChannelCount(config.channel_layout()) > 1) ? 1 : 0;
- return;
+ return true;
}
- CHECK_GE(data_size, kOpusHeaderStreamMapOffset + header->channels)
- << "Invalid stream map; insufficient data for current channel count: "
- << header->channels;
+ if (data_size < kOpusExtraDataStreamMapOffset + extra_data->channels) {
+ DLOG(ERROR) << "Invalid stream map; insufficient data for current channel "
+ << "count: " << extra_data->channels;
+ return false;
+ }
- header->num_streams = *(data + kOpusHeaderNumStreamsOffset);
- header->num_coupled = *(data + kOpusHeaderNumCoupledOffset);
+ extra_data->num_streams = *(data + kOpusExtraDataNumStreamsOffset);
+ extra_data->num_coupled = *(data + kOpusExtraDataNumCoupledOffset);
- if (header->num_streams + header->num_coupled != header->channels)
- LOG(WARNING) << "Inconsistent channel mapping.";
+ if (extra_data->num_streams + extra_data->num_coupled != extra_data->channels)
+ DVLOG(1) << "Inconsistent channel mapping.";
- for (int i = 0; i < header->channels; ++i)
- header->stream_map[i] = *(data + kOpusHeaderStreamMapOffset + i);
+ for (int i = 0; i < extra_data->channels; ++i)
+ extra_data->stream_map[i] = *(data + kOpusExtraDataStreamMapOffset + i);
+ return true;
}
OpusAudioDecoder::OpusAudioDecoder(
@@ -248,11 +258,14 @@ OpusAudioDecoder::OpusAudioDecoder(
weak_factory_(this),
demuxer_stream_(NULL),
opus_decoder_(NULL),
- bits_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_NONE),
samples_per_second_(0),
+ sample_format_(kSampleFormatF32),
+ bits_per_channel_(SampleFormatToBytesPerChannel(sample_format_) * 8),
last_input_timestamp_(kNoTimestamp()),
- skip_samples_(0) {
+ frames_to_discard_(0),
+ frame_delay_at_start_(0),
+ start_input_timestamp_(kNoTimestamp()) {
}
void OpusAudioDecoder::Initialize(
@@ -265,7 +278,7 @@ void OpusAudioDecoder::Initialize(
if (demuxer_stream_) {
// TODO(scherkus): initialization currently happens more than once in
// PipelineIntegrationTest.BasicPlayback.
- LOG(ERROR) << "Initialize has already been called.";
+ DLOG(ERROR) << "Initialize has already been called.";
CHECK(false);
}
@@ -366,7 +379,7 @@ void OpusAudioDecoder::BufferReady(
// occurs with some damaged files.
if (input->timestamp() == kNoTimestamp() &&
output_timestamp_helper_->base_timestamp() == kNoTimestamp()) {
- DVLOG(1) << "Received a buffer without timestamps!";
+ DLOG(ERROR) << "Received a buffer without timestamps!";
base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
return;
}
@@ -375,13 +388,21 @@ void OpusAudioDecoder::BufferReady(
input->timestamp() != kNoTimestamp() &&
input->timestamp() < last_input_timestamp_) {
base::TimeDelta diff = input->timestamp() - last_input_timestamp_;
- DVLOG(1) << "Input timestamps are not monotonically increasing! "
- << " ts " << input->timestamp().InMicroseconds() << " us"
- << " diff " << diff.InMicroseconds() << " us";
+ DLOG(ERROR) << "Input timestamps are not monotonically increasing! "
+ << " ts " << input->timestamp().InMicroseconds() << " us"
+ << " diff " << diff.InMicroseconds() << " us";
base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
return;
}
+ // Apply the necessary codec delay.
+ if (start_input_timestamp_ == kNoTimestamp())
+ start_input_timestamp_ = input->timestamp();
+ if (last_input_timestamp_ == kNoTimestamp() &&
+ input->timestamp() == start_input_timestamp_) {
+ frames_to_discard_ = frame_delay_at_start_;
+ }
+
last_input_timestamp_ = input->timestamp();
scoped_refptr<AudioBuffer> output_buffer;
@@ -405,7 +426,7 @@ bool OpusAudioDecoder::ConfigureDecoder() {
const AudioDecoderConfig& config = demuxer_stream_->audio_decoder_config();
if (config.codec() != kCodecOpus) {
- DLOG(ERROR) << "codec must be kCodecOpus.";
+ DVLOG(1) << "Codec must be kCodecOpus.";
return false;
}
@@ -421,69 +442,55 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- if (config.bits_per_channel() != kBitsPerChannel) {
- DLOG(ERROR) << "16 bit samples required.";
- return false;
- }
-
if (config.is_encrypted()) {
DLOG(ERROR) << "Encrypted audio stream not supported.";
return false;
}
if (opus_decoder_ &&
- (bits_per_channel_ != config.bits_per_channel() ||
- channel_layout_ != config.channel_layout() ||
+ (channel_layout_ != config.channel_layout() ||
samples_per_second_ != config.samples_per_second())) {
- DVLOG(1) << "Unsupported config change :";
- DVLOG(1) << "\tbits_per_channel : " << bits_per_channel_
- << " -> " << config.bits_per_channel();
- DVLOG(1) << "\tchannel_layout : " << channel_layout_
- << " -> " << config.channel_layout();
- DVLOG(1) << "\tsample_rate : " << samples_per_second_
- << " -> " << config.samples_per_second();
+ DLOG(ERROR) << "Unsupported config change -"
+ << ", channel_layout: " << channel_layout_
+ << " -> " << config.channel_layout()
+ << ", sample_rate: " << samples_per_second_
+ << " -> " << config.samples_per_second();
return false;
}
// Clean up existing decoder if necessary.
CloseDecoder();
- // Allocate the output buffer if necessary.
- if (!output_buffer_)
- output_buffer_.reset(new int16[kMaxOpusOutputPacketSizeSamples]);
+ // Parse the Opus Extra Data.
+ OpusExtraData opus_extra_data;
+ if (!ParseOpusExtraData(config.extra_data(), config.extra_data_size(),
+ config,
+ &opus_extra_data))
+ return false;
- // Parse the Opus header.
- OpusHeader opus_header;
- ParseOpusHeader(config.extra_data(), config.extra_data_size(),
- config,
- &opus_header);
+ // Convert from seconds to samples.
+ timestamp_offset_ = config.codec_delay();
+ frame_delay_at_start_ = TimeDeltaToAudioFrames(config.codec_delay(),
+ config.samples_per_second());
+ if (timestamp_offset_ <= base::TimeDelta() || frame_delay_at_start_ < 0) {
+ DLOG(ERROR) << "Invalid file. Incorrect value for codec delay: "
+ << config.codec_delay().InMicroseconds();
+ return false;
+ }
- if (!config.codec_delay().InMicroseconds()) {
- // TODO(vigneshv): Replace this with return false once ffmpeg demuxer code
- // starts populating the config correctly.
- skip_samples_ = opus_header.skip_samples;
- } else {
- // Convert from seconds to samples.
- skip_samples_ = std::ceil(config.codec_delay().InMicroseconds() *
- config.samples_per_second() / 1000000.0);
- if (skip_samples_ < 0) {
- DVLOG(1) << "Invalid file. Incorrect value for codec delay.";
- return false;
- }
- if (skip_samples_ != opus_header.skip_samples) {
- DVLOG(1) << "Invalid file. Codec Delay in container does not match the "
- << "value in Opus header.";
- return false;
- }
+ if (frame_delay_at_start_ != opus_extra_data.skip_samples) {
+ DLOG(ERROR) << "Invalid file. Codec Delay in container does not match the "
+ << "value in Opus Extra Data.";
+ return false;
}
- uint8 channel_mapping[kMaxVorbisChannels];
+ uint8 channel_mapping[kMaxVorbisChannels] = {0};
memcpy(&channel_mapping,
kDefaultOpusChannelLayout,
kMaxChannelsWithDefaultLayout);
if (channel_count > kMaxChannelsWithDefaultLayout) {
- RemapOpusChannelLayout(opus_header.stream_map,
+ RemapOpusChannelLayout(opus_extra_data.stream_map,
channel_count,
channel_mapping);
}
@@ -492,21 +499,29 @@ bool OpusAudioDecoder::ConfigureDecoder() {
int status = OPUS_INVALID_STATE;
opus_decoder_ = opus_multistream_decoder_create(config.samples_per_second(),
channel_count,
- opus_header.num_streams,
- opus_header.num_coupled,
+ opus_extra_data.num_streams,
+ opus_extra_data.num_coupled,
channel_mapping,
&status);
if (!opus_decoder_ || status != OPUS_OK) {
- LOG(ERROR) << "opus_multistream_decoder_create failed status="
- << opus_strerror(status);
+ DLOG(ERROR) << "opus_multistream_decoder_create failed status="
+ << opus_strerror(status);
+ return false;
+ }
+
+ status = opus_multistream_decoder_ctl(
+ opus_decoder_, OPUS_SET_GAIN(opus_extra_data.gain_db));
+ if (status != OPUS_OK) {
+ DLOG(ERROR) << "Failed to set OPUS header gain; status="
+ << opus_strerror(status);
return false;
}
- bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
output_timestamp_helper_.reset(
new AudioTimestampHelper(config.samples_per_second()));
+ start_input_timestamp_ = kNoTimestamp();
return true;
}
@@ -520,75 +535,93 @@ void OpusAudioDecoder::CloseDecoder() {
void OpusAudioDecoder::ResetTimestampState() {
output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
last_input_timestamp_ = kNoTimestamp();
- skip_samples_ = 0;
+ frames_to_discard_ = TimeDeltaToAudioFrames(
+ demuxer_stream_->audio_decoder_config().seek_preroll(),
+ samples_per_second_);
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
scoped_refptr<AudioBuffer>* output_buffer) {
- int samples_decoded = opus_multistream_decode(opus_decoder_,
- input->data(),
- input->data_size(),
- &output_buffer_[0],
- kMaxOpusOutputPacketSizeSamples,
- 0);
- if (samples_decoded < 0) {
- LOG(ERROR) << "opus_multistream_decode failed for"
- << " timestamp: " << input->timestamp().InMicroseconds()
- << " us, duration: " << input->duration().InMicroseconds()
- << " us, packet size: " << input->data_size() << " bytes with"
- << " status: " << opus_strerror(samples_decoded);
+ // Allocate a buffer for the output samples.
+ *output_buffer = AudioBuffer::CreateBuffer(
+ sample_format_,
+ ChannelLayoutToChannelCount(channel_layout_),
+ kMaxOpusOutputPacketSizeSamples);
+ const int buffer_size =
+ output_buffer->get()->channel_count() *
+ output_buffer->get()->frame_count() *
+ SampleFormatToBytesPerChannel(sample_format_);
+
+ float* float_output_buffer = reinterpret_cast<float*>(
+ output_buffer->get()->channel_data()[0]);
+ const int frames_decoded =
+ opus_multistream_decode_float(opus_decoder_,
+ input->data(),
+ input->data_size(),
+ float_output_buffer,
+ buffer_size,
+ 0);
+
+ if (frames_decoded < 0) {
+ DLOG(ERROR) << "opus_multistream_decode failed for"
+ << " timestamp: " << input->timestamp().InMicroseconds()
+ << " us, duration: " << input->duration().InMicroseconds()
+ << " us, packet size: " << input->data_size() << " bytes with"
+ << " status: " << opus_strerror(frames_decoded);
return false;
}
- uint8* decoded_audio_data = reinterpret_cast<uint8*>(&output_buffer_[0]);
- int decoded_audio_size = samples_decoded *
- demuxer_stream_->audio_decoder_config().bytes_per_frame();
- DCHECK_LE(decoded_audio_size, kMaxOpusOutputPacketSizeBytes);
-
if (output_timestamp_helper_->base_timestamp() == kNoTimestamp() &&
!input->end_of_stream()) {
DCHECK(input->timestamp() != kNoTimestamp());
output_timestamp_helper_->SetBaseTimestamp(input->timestamp());
}
- if (decoded_audio_size > 0) {
- // Copy the audio samples into an output buffer.
- uint8* data[] = { decoded_audio_data };
- *output_buffer = AudioBuffer::CopyFrom(
- kSampleFormatS16,
- ChannelLayoutToChannelCount(channel_layout_),
- samples_decoded,
- data,
- output_timestamp_helper_->GetTimestamp(),
- output_timestamp_helper_->GetFrameDuration(samples_decoded));
- output_timestamp_helper_->AddFrames(samples_decoded);
- if (skip_samples_ > 0) {
- int dropped_size = std::min(samples_decoded, skip_samples_);
- output_buffer->get()->TrimStart(dropped_size);
- skip_samples_ -= dropped_size;
- samples_decoded -= dropped_size;
+ // Trim off any extraneous allocation.
+ DCHECK_LE(frames_decoded, output_buffer->get()->frame_count());
+ const int trim_frames = output_buffer->get()->frame_count() - frames_decoded;
+ if (trim_frames > 0)
+ output_buffer->get()->TrimEnd(trim_frames);
+
+ // Handle frame discard and trimming.
+ int frames_to_output = frames_decoded;
+ if (frames_decoded > frames_to_discard_) {
+ if (frames_to_discard_ > 0) {
+ output_buffer->get()->TrimStart(frames_to_discard_);
+ frames_to_output -= frames_to_discard_;
+ frames_to_discard_ = 0;
}
if (input->discard_padding().InMicroseconds() > 0) {
- int discard_padding = std::ceil(
- input->discard_padding().InMicroseconds() *
- samples_per_second_ / 1000000.0);
- if (discard_padding < 0 || discard_padding > samples_decoded) {
+ int discard_padding = TimeDeltaToAudioFrames(input->discard_padding(),
+ samples_per_second_);
+ if (discard_padding < 0 || discard_padding > frames_to_output) {
DVLOG(1) << "Invalid file. Incorrect discard padding value.";
return false;
}
- output_buffer->get()->TrimEnd(std::min(samples_decoded, discard_padding));
- samples_decoded -= discard_padding;
+ output_buffer->get()->TrimEnd(discard_padding);
+ frames_to_output -= discard_padding;
}
+ } else {
+ frames_to_discard_ -= frames_to_output;
+ frames_to_output = 0;
}
- decoded_audio_size =
- samples_decoded *
- demuxer_stream_->audio_decoder_config().bytes_per_frame();
// Decoding finished successfully, update statistics.
PipelineStatistics statistics;
- statistics.audio_bytes_decoded = decoded_audio_size;
+ statistics.audio_bytes_decoded = input->data_size();
statistics_cb_.Run(statistics);
+ // Assign timestamp and duration to the buffer.
+ output_buffer->get()->set_timestamp(
+ output_timestamp_helper_->GetTimestamp() - timestamp_offset_);
+ output_buffer->get()->set_duration(
+ output_timestamp_helper_->GetFrameDuration(frames_to_output));
+ output_timestamp_helper_->AddFrames(frames_decoded);
+
+ // Discard the buffer to indicate we need more data.
+ if (!frames_to_output)
+ *output_buffer = NULL;
+
return true;
}
diff --git a/chromium/media/filters/opus_audio_decoder.h b/chromium/media/filters/opus_audio_decoder.h
index 77e84344f0c..982458b1afa 100644
--- a/chromium/media/filters/opus_audio_decoder.h
+++ b/chromium/media/filters/opus_audio_decoder.h
@@ -10,6 +10,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/sample_format.h"
struct OpusMSDecoder;
@@ -62,9 +63,10 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
OpusMSDecoder* opus_decoder_;
// Decoded audio format.
- int bits_per_channel_;
ChannelLayout channel_layout_;
int samples_per_second_;
+ const SampleFormat sample_format_;
+ const int bits_per_channel_;
// Used for computing output timestamps.
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
@@ -72,10 +74,22 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
ReadCB read_cb_;
- int skip_samples_;
-
- // Buffer for output from libopus.
- scoped_ptr<int16[]> output_buffer_;
+ // Number of frames to be discarded from the start of the packet. This value
+ // is respected for all packets except for the first one in the stream. For
+ // the first packet in the stream, |frame_delay_at_start_| is used. This is
+ // usually set to the SeekPreRoll value from the container whenever a seek
+ // happens.
+ int frames_to_discard_;
+
+ // Number of frames to be discarded at the start of the stream. This value
+ // is typically the CodecDelay value from the container. This value should
+ // only be applied when input timestamp is |start_input_timestamp_|.
+ int frame_delay_at_start_;
+ base::TimeDelta start_input_timestamp_;
+
+ // Timestamp to be subtracted from all the frames. This is typically computed
+ // from the CodecDelay value in the container.
+ base::TimeDelta timestamp_offset_;
DISALLOW_IMPLICIT_CONSTRUCTORS(OpusAudioDecoder);
};
diff --git a/chromium/media/filters/pipeline_integration_perftest.cc b/chromium/media/filters/pipeline_integration_perftest.cc
new file mode 100644
index 00000000000..aea9363c94a
--- /dev/null
+++ b/chromium/media/filters/pipeline_integration_perftest.cc
@@ -0,0 +1,92 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/test_data_util.h"
+#include "media/filters/pipeline_integration_test_base.h"
+#include "testing/perf/perf_test.h"
+
+namespace media {
+
+static const int kBenchmarkIterationsAudio = 200;
+static const int kBenchmarkIterationsVideo = 20;
+
+static void RunPlaybackBenchmark(const std::string& filename,
+ const std::string& name,
+ int iterations,
+ bool audio_only) {
+ double time_seconds = 0.0;
+
+ for (int i = 0; i < iterations; ++i) {
+ PipelineIntegrationTestBase pipeline;
+
+ ASSERT_TRUE(pipeline.Start(GetTestDataFilePath(filename),
+ PIPELINE_OK,
+ PipelineIntegrationTestBase::kClockless));
+
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ pipeline.Play();
+
+ ASSERT_TRUE(pipeline.WaitUntilOnEnded());
+
+ // Call Stop() to ensure that the rendering is complete.
+ pipeline.Stop();
+
+ if (audio_only) {
+ time_seconds += pipeline.GetAudioTime().InSecondsF();
+ } else {
+ time_seconds += (base::TimeTicks::HighResNow() - start).InSecondsF();
+ }
+ }
+
+ perf_test::PrintResult(name,
+ "",
+ filename,
+ iterations / time_seconds,
+ "runs/s",
+ true);
+}
+
+static void RunVideoPlaybackBenchmark(const std::string& filename,
+ const std::string name) {
+ RunPlaybackBenchmark(filename, name, kBenchmarkIterationsVideo, false);
+}
+
+static void RunAudioPlaybackBenchmark(const std::string& filename,
+ const std::string& name) {
+ RunPlaybackBenchmark(filename, name, kBenchmarkIterationsAudio, true);
+}
+
+TEST(PipelineIntegrationPerfTest, AudioPlaybackBenchmark) {
+ RunAudioPlaybackBenchmark("sfx_f32le.wav", "clockless_playback");
+ RunAudioPlaybackBenchmark("sfx_s24le.wav", "clockless_playback");
+ RunAudioPlaybackBenchmark("sfx_s16le.wav", "clockless_playback");
+ RunAudioPlaybackBenchmark("sfx_u8.wav", "clockless_playback");
+#if defined(USE_PROPRIETARY_CODECS)
+ RunAudioPlaybackBenchmark("sfx.mp3", "clockless_playback");
+#endif
+}
+
+TEST(PipelineIntegrationPerfTest, VP8PlaybackBenchmark) {
+ RunVideoPlaybackBenchmark("bear-640x360.webm",
+ "clockless_video_playback_vp8");
+ RunVideoPlaybackBenchmark("bear-320x240.webm",
+ "clockless_video_playback_vp8");
+}
+
+TEST(PipelineIntegrationPerfTest, VP9PlaybackBenchmark) {
+ RunVideoPlaybackBenchmark("bear-vp9.webm", "clockless_video_playback_vp9");
+}
+
+TEST(PipelineIntegrationPerfTest, TheoraPlaybackBenchmark) {
+ RunVideoPlaybackBenchmark("bear.ogv", "clockless_video_playback_theora");
+}
+
+#if defined(USE_PROPRIETARY_CODECS)
+TEST(PipelineIntegrationPerfTest, MP4PlaybackBenchmark) {
+ RunVideoPlaybackBenchmark("bear-1280x720.mp4",
+ "clockless_video_playback_mp4");
+}
+#endif
+
+} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test.cc b/chromium/media/filters/pipeline_integration_test.cc
index 0ce2fd12446..bdf33f22418 100644
--- a/chromium/media/filters/pipeline_integration_test.cc
+++ b/chromium/media/filters/pipeline_integration_test.cc
@@ -14,6 +14,7 @@
#include "media/base/media_switches.h"
#include "media/base/test_data_util.h"
#include "media/cdm/aes_decryptor.h"
+#include "media/cdm/json_web_key.h"
#include "media/filters/chunk_demuxer.h"
using testing::AnyNumber;
@@ -21,51 +22,53 @@ using testing::AtMost;
namespace media {
-static const char kSourceId[] = "SourceId";
-static const char kClearKeySystem[] = "org.w3.clearkey";
-static const uint8 kInitData[] = { 0x69, 0x6e, 0x69, 0x74 };
-
-static const char kWebM[] = "video/webm; codecs=\"vp8,vorbis\"";
-static const char kWebMVP9[] = "video/webm; codecs=\"vp9\"";
-static const char kAudioOnlyWebM[] = "video/webm; codecs=\"vorbis\"";
-static const char kOpusAudioOnlyWebM[] = "video/webm; codecs=\"opus\"";
-static const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
-static const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
-static const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
-static const char kMP4Audio[] = "audio/mp4; codecs=\"mp4a.40.2\"";
-static const char kMP4AudioType[] = "audio/mp4";
-static const char kMP4VideoType[] = "video/mp4";
-static const char kMP3[] = "audio/mpeg";
+const char kSourceId[] = "SourceId";
+const uint8 kInitData[] = { 0x69, 0x6e, 0x69, 0x74 };
+
+const char kWebM[] = "video/webm; codecs=\"vp8,vorbis\"";
+const char kWebMVP9[] = "video/webm; codecs=\"vp9\"";
+const char kAudioOnlyWebM[] = "video/webm; codecs=\"vorbis\"";
+const char kOpusAudioOnlyWebM[] = "video/webm; codecs=\"opus\"";
+const char kVideoOnlyWebM[] = "video/webm; codecs=\"vp8\"";
+const char kMP4VideoType[] = "video/mp4";
+const char kMP4AudioType[] = "audio/mp4";
+#if defined(USE_PROPRIETARY_CODECS)
+const char kMP4[] = "video/mp4; codecs=\"avc1.4D4041,mp4a.40.2\"";
+const char kMP4Video[] = "video/mp4; codecs=\"avc1.4D4041\"";
+const char kMP4VideoAVC3[] = "video/mp4; codecs=\"avc3.64001f\"";
+const char kMP4Audio[] = "audio/mp4; codecs=\"mp4a.40.2\"";
+const char kMP3[] = "audio/mpeg";
+#endif // defined(USE_PROPRIETARY_CODECS)
// Key used to encrypt test files.
-static const uint8 kSecretKey[] = {
+const uint8 kSecretKey[] = {
0xeb, 0xdd, 0x62, 0xf1, 0x68, 0x14, 0xd2, 0x7b,
0x68, 0xef, 0x12, 0x2a, 0xfc, 0xe4, 0xae, 0x3c
};
// The key ID for all encrypted files.
-static const uint8 kKeyId[] = {
+const uint8 kKeyId[] = {
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35
};
-static const int kAppendWholeFile = -1;
+const int kAppendWholeFile = -1;
// Constants for the Media Source config change tests.
-static const int kAppendTimeSec = 1;
-static const int kAppendTimeMs = kAppendTimeSec * 1000;
-static const int k320WebMFileDurationMs = 2737;
-static const int k640WebMFileDurationMs = 2763;
-static const int k640IsoFileDurationMs = 2737;
-static const int k640IsoCencFileDurationMs = 2736;
-static const int k1280IsoFileDurationMs = 2736;
-static const int kOpusEndTrimmingWebMFileDurationMs = 2771;
-static const uint32 kOpusEndTrimmingWebMFileAudioBytes = 528676;
-static const int kVP9WebMFileDurationMs = 2735;
-static const int kVP8AWebMFileDurationMs = 2700;
-
-// Command line switch for runtime adjustment of audio file to be benchmarked.
-static const char kBenchmarkAudioFile[] = "benchmark-audio-file";
+const int kAppendTimeSec = 1;
+const int kAppendTimeMs = kAppendTimeSec * 1000;
+const int k320WebMFileDurationMs = 2737;
+const int k640WebMFileDurationMs = 2763;
+const int kOpusEndTrimmingWebMFileDurationMs = 2771;
+const int kVP9WebMFileDurationMs = 2735;
+const int kVP8AWebMFileDurationMs = 2700;
+
+#if defined(USE_PROPRIETARY_CODECS)
+const int k640IsoFileDurationMs = 2737;
+const int k640IsoCencFileDurationMs = 2736;
+const int k1280IsoFileDurationMs = 2736;
+const int k1280IsoAVC3FileDurationMs = 2735;
+#endif // defined(USE_PROPRIETARY_CODECS)
// Note: Tests using this class only exercise the DecryptingDemuxerStream path.
// They do not exercise the Decrypting{Audio|Video}Decoder path.
@@ -76,60 +79,74 @@ class FakeEncryptedMedia {
public:
virtual ~AppBase() {}
- virtual void KeyAdded(const std::string& session_id) = 0;
+ virtual void OnSessionCreated(uint32 session_id,
+ const std::string& web_session_id) = 0;
+
+ virtual void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) = 0;
+
+ virtual void OnSessionReady(uint32 session_id) = 0;
+
+ virtual void OnSessionClosed(uint32 session_id) = 0;
// Errors are not expected unless overridden.
- virtual void KeyError(const std::string& session_id,
- MediaKeys::KeyError error_code,
- int system_code) {
+ virtual void OnSessionError(uint32 session_id,
+ MediaKeys::KeyError error_code,
+ int system_code) {
FAIL() << "Unexpected Key Error";
}
- virtual void KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) = 0;
-
- virtual void NeedKey(const std::string& session_id,
- const std::string& type,
+ virtual void NeedKey(const std::string& type,
const std::vector<uint8>& init_data,
AesDecryptor* decryptor) = 0;
};
FakeEncryptedMedia(AppBase* app)
- : decryptor_(base::Bind(&FakeEncryptedMedia::KeyAdded,
+ : decryptor_(base::Bind(&FakeEncryptedMedia::OnSessionCreated,
+ base::Unretained(this)),
+ base::Bind(&FakeEncryptedMedia::OnSessionMessage,
base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::KeyError,
+ base::Bind(&FakeEncryptedMedia::OnSessionReady,
base::Unretained(this)),
- base::Bind(&FakeEncryptedMedia::KeyMessage,
+ base::Bind(&FakeEncryptedMedia::OnSessionClosed,
+ base::Unretained(this)),
+ base::Bind(&FakeEncryptedMedia::OnSessionError,
base::Unretained(this))),
- app_(app) {
- }
+ app_(app) {}
AesDecryptor* decryptor() {
return &decryptor_;
}
- // Callbacks for firing key events. Delegate to |app_|.
- void KeyAdded(const std::string& session_id) {
- app_->KeyAdded(session_id);
+ // Callbacks for firing session events. Delegate to |app_|.
+ void OnSessionCreated(uint32 session_id, const std::string& web_session_id) {
+ app_->OnSessionCreated(session_id, web_session_id);
}
- void KeyError(const std::string& session_id,
- MediaKeys::KeyError error_code,
- int system_code) {
- app_->KeyError(session_id, error_code, system_code);
+ void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& destination_url) {
+ app_->OnSessionMessage(session_id, message, destination_url);
}
- void KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) {
- app_->KeyMessage(session_id, message, default_url);
+ void OnSessionReady(uint32 session_id) {
+ app_->OnSessionReady(session_id);
}
- void NeedKey(const std::string& session_id,
- const std::string& type,
+ void OnSessionClosed(uint32 session_id) {
+ app_->OnSessionClosed(session_id);
+ }
+
+ void OnSessionError(uint32 session_id,
+ MediaKeys::KeyError error_code,
+ int system_code) {
+ app_->OnSessionError(session_id, error_code, system_code);
+ }
+
+ void NeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
- app_->NeedKey(session_id, type, init_data, &decryptor_);
+ app_->NeedKey(type, init_data, &decryptor_);
}
private:
@@ -140,31 +157,40 @@ class FakeEncryptedMedia {
// Provides |kSecretKey| in response to needkey.
class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
public:
- virtual void KeyAdded(const std::string& session_id) OVERRIDE {
- EXPECT_FALSE(session_id.empty());
+ KeyProvidingApp() : current_session_id_(0) {}
+
+ virtual void OnSessionCreated(uint32 session_id,
+ const std::string& web_session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ EXPECT_FALSE(web_session_id.empty());
}
- virtual void KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) OVERRIDE {
- EXPECT_FALSE(session_id.empty());
+ virtual void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
EXPECT_FALSE(message.empty());
current_session_id_ = session_id;
}
- virtual void NeedKey(const std::string& session_id,
- const std::string& type,
+ virtual void OnSessionReady(uint32 session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ }
+
+ virtual void OnSessionClosed(uint32 session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ }
+
+ virtual void NeedKey(const std::string& type,
const std::vector<uint8>& init_data,
AesDecryptor* decryptor) OVERRIDE {
- current_session_id_ = session_id;
-
- if (current_session_id_.empty()) {
- EXPECT_TRUE(decryptor->GenerateKeyRequest(type, kInitData,
- arraysize(kInitData)));
+ if (current_session_id_ == 0u) {
+ EXPECT_TRUE(
+ decryptor->CreateSession(12, type, kInitData, arraysize(kInitData)));
}
- EXPECT_FALSE(current_session_id_.empty());
+ EXPECT_EQ(current_session_id_, 12u);
// Clear Key really needs the key ID in |init_data|. For WebM, they are the
// same, but this is not the case for ISO CENC. Therefore, provide the
@@ -176,31 +202,45 @@ class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
key_id_length = arraysize(kKeyId);
}
- decryptor->AddKey(kSecretKey, arraysize(kSecretKey),
- key_id, key_id_length, current_session_id_);
+ // Convert key into a JSON structure and then add it.
+ std::string jwk = GenerateJWKSet(
+ kSecretKey, arraysize(kSecretKey), key_id, key_id_length);
+ decryptor->UpdateSession(current_session_id_,
+ reinterpret_cast<const uint8*>(jwk.data()),
+ jwk.size());
}
- std::string current_session_id_;
+ uint32 current_session_id_;
};
// Ignores needkey and does not perform a license request
class NoResponseApp : public FakeEncryptedMedia::AppBase {
public:
- virtual void KeyAdded(const std::string& session_id) OVERRIDE {
- EXPECT_FALSE(session_id.empty());
- FAIL() << "Unexpected KeyAdded";
+ virtual void OnSessionCreated(uint32 session_id,
+ const std::string& web_session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ EXPECT_FALSE(web_session_id.empty());
}
- virtual void KeyMessage(const std::string& session_id,
- const std::vector<uint8>& message,
- const std::string& default_url) OVERRIDE {
- EXPECT_FALSE(session_id.empty());
+ virtual void OnSessionMessage(uint32 session_id,
+ const std::vector<uint8>& message,
+ const std::string& default_url) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
EXPECT_FALSE(message.empty());
FAIL() << "Unexpected KeyMessage";
}
- virtual void NeedKey(const std::string& session_id,
- const std::string& type,
+ virtual void OnSessionReady(uint32 session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ FAIL() << "Unexpected Ready";
+ }
+
+ virtual void OnSessionClosed(uint32 session_id) OVERRIDE {
+ EXPECT_GT(session_id, 0u);
+ FAIL() << "Unexpected Closed";
+ }
+
+ virtual void NeedKey(const std::string& type,
const std::vector<uint8>& init_data,
AesDecryptor* decryptor) OVERRIDE {
}
@@ -221,8 +261,6 @@ class MockMediaSource {
base::Unretained(this)),
base::Bind(&MockMediaSource::DemuxerNeedKey,
base::Unretained(this)),
- base::Bind(&MockMediaSource::OnTextTrack,
- base::Unretained(this)),
LogCB())),
owned_chunk_demuxer_(chunk_demuxer_) {
@@ -239,7 +277,7 @@ class MockMediaSource {
scoped_ptr<Demuxer> GetDemuxer() { return owned_chunk_demuxer_.Pass(); }
- void set_need_key_cb(const NeedKeyCB& need_key_cb) {
+ void set_need_key_cb(const Demuxer::NeedKeyCB& need_key_cb) {
need_key_cb_ = need_key_cb;
}
@@ -321,13 +359,7 @@ class MockMediaSource {
const std::vector<uint8>& init_data) {
DCHECK(!init_data.empty());
CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(std::string(), type, init_data);
- }
-
- scoped_ptr<TextTrack> OnTextTrack(TextKind kind,
- const std::string& label,
- const std::string& language) {
- return scoped_ptr<TextTrack>();
+ need_key_cb_.Run(type, init_data);
}
private:
@@ -338,7 +370,7 @@ class MockMediaSource {
std::string mimetype_;
ChunkDemuxer* chunk_demuxer_;
scoped_ptr<Demuxer> owned_chunk_demuxer_;
- NeedKeyCB need_key_cb_;
+ Demuxer::NeedKeyCB need_key_cb_;
};
class PipelineIntegrationTest
@@ -362,6 +394,11 @@ class PipelineIntegrationTest
message_loop_.Run();
}
+ void StartHashedPipelineWithMediaSource(MockMediaSource* source) {
+ hashing_enabled_ = true;
+ StartPipelineWithMediaSource(source);
+ }
+
void StartPipelineWithEncryptedMedia(
MockMediaSource* source,
FakeEncryptedMedia* encrypted_media) {
@@ -437,28 +474,6 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackHashed) {
EXPECT_EQ("-3.59,-2.06,-0.43,2.15,0.77,-0.95,", GetAudioHash());
}
-TEST_F(PipelineIntegrationTest, AudioPlaybackBenchmark) {
- // Audio-only files are all that is allowed for clockless playback.
- // Audio file can be specified on the command line
- // (--benchmark-audio-file=id3_png_test.mp3), so check for it.
- std::string filename(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kBenchmarkAudioFile));
- if (filename.empty())
- filename = "sfx_f32le.wav";
-
- ASSERT_TRUE(Start(GetTestDataFilePath(filename), PIPELINE_OK, kClockless));
-
- Play();
-
- ASSERT_TRUE(WaitUntilOnEnded());
-
- // Call Stop() to ensure that the rendering is complete.
- Stop();
- printf("Clockless playback of %s took %.2f ms.\n",
- filename.c_str(),
- GetAudioTime().InMillisecondsF());
-}
-
TEST_F(PipelineIntegrationTest, F32PlaybackHashed) {
ASSERT_TRUE(
Start(GetTestDataFilePath("sfx_f32le.wav"), PIPELINE_OK, kHashed));
@@ -551,8 +566,36 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus_WebM) {
Play();
ASSERT_TRUE(WaitUntilOnEnded());
- EXPECT_EQ(kOpusEndTrimmingWebMFileAudioBytes,
- pipeline_->GetStatistics().audio_bytes_decoded);
+ source.Abort();
+ Stop();
+}
+
+// Flaky. http://crbug.com/304776
+TEST_F(PipelineIntegrationTest, DISABLED_MediaSource_Opus_Seeking_WebM) {
+ EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
+ MockMediaSource source("bear-opus-end-trimming.webm", kOpusAudioOnlyWebM,
+ kAppendWholeFile);
+ StartHashedPipelineWithMediaSource(&source);
+
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ base::TimeDelta start_seek_time = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2000);
+
+ Play();
+ ASSERT_TRUE(WaitUntilCurrentTimeIsAfter(start_seek_time));
+ source.Seek(seek_time, 0x1D5, 34017);
+ source.EndOfStream();
+ ASSERT_TRUE(Seek(seek_time));
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+
+ EXPECT_EQ("0.76,0.20,-0.82,-0.58,-1.29,-0.29,", GetAudioHash());
+
source.Abort();
Stop();
}
@@ -930,6 +973,25 @@ TEST_F(PipelineIntegrationTest,
source.Abort();
Stop();
}
+
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_VideoOnly_MP4_AVC3) {
+ MockMediaSource source("bear-1280x720-v_frag-avc3.mp4", kMP4VideoAVC3,
+ kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(k1280IsoAVC3FileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
+
+ Play();
+
+ ASSERT_TRUE(WaitUntilOnEnded());
+ source.Abort();
+ Stop();
+}
+
#endif
// TODO(acolwell): Fix flakiness http://crbug.com/117921
@@ -996,8 +1058,12 @@ TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_VideoOnly) {
// Verify that Opus audio in WebM containers can be played back.
TEST_F(PipelineIntegrationTest, BasicPlayback_AudioOnly_Opus_WebM) {
- ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus.webm"),
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-opus-end-trimming.webm"),
PIPELINE_OK));
+ EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
+ EXPECT_EQ(0, pipeline_->GetBufferedTimeRanges().start(0).InMilliseconds());
+ EXPECT_EQ(kOpusEndTrimmingWebMFileDurationMs,
+ pipeline_->GetBufferedTimeRanges().end(0).InMilliseconds());
Play();
ASSERT_TRUE(WaitUntilOnEnded());
}
@@ -1033,8 +1099,19 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
EXPECT_EQ(last_video_frame_format_, VideoFrame::YV12A);
}
+// Verify that VP8A video with odd width/height can be played back.
+TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_Odd_WebM) {
+ EXPECT_CALL(*this, OnSetOpaque(false)).Times(AnyNumber());
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a-odd-dimensions.webm"),
+ PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_EQ(last_video_frame_format_, VideoFrame::YV12A);
+}
+
// Verify that VP8 video with inband text track can be played back.
-TEST_F(PipelineIntegrationTest, BasicPlayback_VP8_WebVTT_WebM) {
+TEST_F(PipelineIntegrationTest,
+ BasicPlayback_VP8_WebVTT_WebM) {
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8-webvtt.webm"),
PIPELINE_OK));
Play();
diff --git a/chromium/media/filters/pipeline_integration_test_base.cc b/chromium/media/filters/pipeline_integration_test_base.cc
index 3f0910a2b8b..8dce18cc493 100644
--- a/chromium/media/filters/pipeline_integration_test_base.cc
+++ b/chromium/media/filters/pipeline_integration_test_base.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/memory/scoped_vector.h"
+#include "media/base/clock.h"
#include "media/base/media_log.h"
#include "media/filters/audio_renderer_impl.h"
#include "media/filters/chunk_demuxer.h"
@@ -31,7 +32,7 @@ PipelineIntegrationTestBase::PipelineIntegrationTestBase()
new MediaLog())),
ended_(false),
pipeline_status_(PIPELINE_OK),
- last_video_frame_format_(VideoFrame::INVALID) {
+ last_video_frame_format_(VideoFrame::UNKNOWN) {
base::MD5Init(&md5_context_);
EXPECT_CALL(*this, OnSetOpaque(true)).Times(AnyNumber());
}
@@ -68,7 +69,7 @@ void PipelineIntegrationTestBase::DemuxerNeedKeyCB(
const std::vector<uint8>& init_data) {
DCHECK(!init_data.empty());
CHECK(!need_key_cb_.is_null());
- need_key_cb_.Run(std::string(), type, init_data);
+ need_key_cb_.Run(type, init_data);
}
void PipelineIntegrationTestBase::OnEnded() {
@@ -122,6 +123,9 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
kTestType test_type) {
hashing_enabled_ = test_type == kHashed;
clockless_playback_ = test_type == kClockless;
+ if (clockless_playback_) {
+ pipeline_->SetClockForTesting(new Clock(&dummy_clock_));
+ }
return Start(file_path, expected_status);
}
@@ -229,30 +233,29 @@ PipelineIntegrationTestBase::CreateFilterCollection(
scoped_ptr<FilterCollection> collection(new FilterCollection());
collection->SetDemuxer(demuxer_.get());
- if (!clockless_playback_) {
- ScopedVector<VideoDecoder> video_decoders;
- video_decoders.push_back(
- new VpxVideoDecoder(message_loop_.message_loop_proxy()));
- video_decoders.push_back(
- new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
-
- // Disable frame dropping if hashing is enabled.
- scoped_ptr<VideoRenderer> renderer(new VideoRendererBase(
- message_loop_.message_loop_proxy(),
- video_decoders.Pass(),
- base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
- base::Unretained(this),
- decryptor),
- base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
- base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
- base::Unretained(this)),
- !hashing_enabled_));
- collection->SetVideoRenderer(renderer.Pass());
+ ScopedVector<VideoDecoder> video_decoders;
+ video_decoders.push_back(
+ new VpxVideoDecoder(message_loop_.message_loop_proxy()));
+ video_decoders.push_back(
+ new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
+ // Disable frame dropping if hashing is enabled.
+ scoped_ptr<VideoRenderer> renderer(new VideoRendererImpl(
+ message_loop_.message_loop_proxy(),
+ video_decoders.Pass(),
+ base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
+ base::Unretained(this),
+ decryptor),
+ base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
+ base::Unretained(this)),
+ base::Bind(&PipelineIntegrationTestBase::OnSetOpaque,
+ base::Unretained(this)),
+ false));
+ collection->SetVideoRenderer(renderer.Pass());
+
+ if (!clockless_playback_) {
audio_sink_ = new NullAudioSink(message_loop_.message_loop_proxy());
} else {
- // audio only for clockless_playback_
clockless_audio_sink_ = new ClocklessAudioSink();
}
@@ -270,8 +273,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
audio_decoders.Pass(),
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
base::Unretained(this),
- decryptor),
- true);
+ decryptor));
// Disable underflow if hashing is enabled.
if (hashing_enabled_) {
audio_sink_->StartAudioHashForTesting();
@@ -314,4 +316,9 @@ base::TimeDelta PipelineIntegrationTestBase::GetAudioTime() {
return clockless_audio_sink_->render_time();
}
+base::TimeTicks DummyTickClock::NowTicks() {
+ now_ += base::TimeDelta::FromSeconds(60);
+ return now_;
+}
+
} // namespace media
diff --git a/chromium/media/filters/pipeline_integration_test_base.h b/chromium/media/filters/pipeline_integration_test_base.h
index ade9ad6d974..d162d0b3187 100644
--- a/chromium/media/filters/pipeline_integration_test_base.h
+++ b/chromium/media/filters/pipeline_integration_test_base.h
@@ -9,11 +9,12 @@
#include "base/message_loop/message_loop.h"
#include "media/audio/clockless_audio_sink.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/demuxer.h"
#include "media/base/filter_collection.h"
#include "media/base/media_keys.h"
#include "media/base/pipeline.h"
#include "media/base/video_frame.h"
-#include "media/filters/video_renderer_base.h"
+#include "media/filters/video_renderer_impl.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace base {
@@ -23,7 +24,6 @@ class FilePath;
namespace media {
class Decryptor;
-class Demuxer;
// Empty MD5 hash string. Used to verify empty video tracks.
extern const char kNullVideoHash[];
@@ -31,10 +31,21 @@ extern const char kNullVideoHash[];
// Empty hash string. Used to verify empty audio tracks.
extern const char kNullAudioHash[];
+// Dummy tick clock which advances extremely quickly (1 minute every time
+// NowTicks() is called).
+class DummyTickClock : public base::TickClock {
+ public:
+ DummyTickClock() : now_() {}
+ virtual ~DummyTickClock() {}
+ virtual base::TimeTicks NowTicks() OVERRIDE;
+ private:
+ base::TimeTicks now_;
+};
+
// Integration tests for Pipeline. Real demuxers, real decoders, and
// base renderer implementations are used to verify pipeline functionality. The
// renderers used in these tests rely heavily on the AudioRendererBase &
-// VideoRendererBase implementations which contain a majority of the code used
+// VideoRendererImpl implementations which contain a majority of the code used
// in the real AudioRendererImpl & SkCanvasVideoRenderer implementations used in
// the browser. The renderers in this test don't actually write data to a
// display or audio device. Both of these devices are simulated since they have
@@ -95,8 +106,9 @@ class PipelineIntegrationTestBase {
scoped_refptr<ClocklessAudioSink> clockless_audio_sink_;
bool ended_;
PipelineStatus pipeline_status_;
- NeedKeyCB need_key_cb_;
+ Demuxer::NeedKeyCB need_key_cb_;
VideoFrame::Format last_video_frame_format_;
+ DummyTickClock dummy_clock_;
void OnStatusCallbackChecked(PipelineStatus expected_status,
PipelineStatus status);
@@ -104,7 +116,7 @@ class PipelineIntegrationTestBase {
PipelineStatusCB QuitOnStatusCB(PipelineStatus expected_status);
void DemuxerNeedKeyCB(const std::string& type,
const std::vector<uint8>& init_data);
- void set_need_key_cb(const NeedKeyCB& need_key_cb) {
+ void set_need_key_cb(const Demuxer::NeedKeyCB& need_key_cb) {
need_key_cb_ = need_key_cb;
}
diff --git a/chromium/media/filters/skcanvas_video_renderer.cc b/chromium/media/filters/skcanvas_video_renderer.cc
index f0bf13d4bb8..ec3e92fe754 100644
--- a/chromium/media/filters/skcanvas_video_renderer.cc
+++ b/chromium/media/filters/skcanvas_video_renderer.cc
@@ -13,7 +13,9 @@
namespace media {
static bool IsEitherYV12OrYV16(media::VideoFrame::Format format) {
- return format == media::VideoFrame::YV12 || format == media::VideoFrame::YV16;
+ return format == media::VideoFrame::YV12 ||
+ format == media::VideoFrame::YV16 ||
+ format == media::VideoFrame::YV12J;
}
static bool IsEitherYV12OrYV16OrNative(media::VideoFrame::Format format) {
@@ -87,6 +89,11 @@ static void FastPaint(
y_shift = 1;
}
+ if (video_frame->format() == media::VideoFrame::YV12J) {
+ yuv_type = media::YV12;
+ y_shift = 1;
+ }
+
// Transform the destination rectangle to local coordinates.
const SkMatrix& local_matrix = canvas->getTotalMatrix();
SkRect local_dest_rect;
@@ -217,8 +224,10 @@ static void ConvertVideoFrameToBitmap(
(video_frame->visible_rect().y() >> y_shift)) +
(video_frame->visible_rect().x() >> 1);
}
+
switch (video_frame->format()) {
case media::VideoFrame::YV12:
+ case media::VideoFrame::YV12J:
media::ConvertYUVToRGB32(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index 7c76c84a049..77fb279550a 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -12,6 +12,22 @@
#include "base/logging.h"
namespace media {
+
+// Buffers with the same timestamp are only allowed under certain conditions.
+// Video: Allowed when the previous frame and current frame are NOT keyframes.
+// This is the situation for VP8 Alt-Ref frames.
+// Otherwise: Allowed in all situations except where a non-keyframe is followed
+// by a keyframe.
+// Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
+// same timestamp situation that is allowed. False is returned otherwise.
+static bool AllowSameTimestamp(
+ bool prev_is_keyframe, bool current_is_keyframe, bool is_video) {
+ if (is_video)
+ return !prev_is_keyframe && !current_is_keyframe;
+
+ return prev_is_keyframe || !current_is_keyframe;
+}
+
// Helper class representing a range of buffered data. All buffers in a
// SourceBufferRange are ordered sequentially in presentation order with no
// gaps.
@@ -28,7 +44,8 @@ class SourceBufferRange {
// empty and the front of |new_buffers| must be a keyframe.
// |media_segment_start_time| refers to the starting timestamp for the media
// segment to which these buffers belong.
- SourceBufferRange(const BufferQueue& new_buffers,
+ SourceBufferRange(bool is_video,
+ const BufferQueue& new_buffers,
base::TimeDelta media_segment_start_time,
const InterbufferDistanceCB& interbuffer_distance_cb);
@@ -88,6 +105,15 @@ class SourceBufferRange {
int DeleteGOPFromFront(BufferQueue* deleted_buffers);
int DeleteGOPFromBack(BufferQueue* deleted_buffers);
+ // Gets the range of GOP to secure at least |bytes_to_free| from
+ // [|start_timestamp|, |end_timestamp|).
+ // Returns the size of the buffers to secure if the buffers of
+ // [|start_timestamp|, |end_removal_timestamp|) is removed.
+ // Will not update |end_removal_timestamp| if the returned size is 0.
+ int GetRemovalGOP(
+ base::TimeDelta start_timestamp, base::TimeDelta end_timestamp,
+ int bytes_to_free, base::TimeDelta* end_removal_timestamp);
+
// Indicates whether the GOP at the beginning or end of the range contains the
// next buffer position.
bool FirstGOPContainsNextBufferPosition() const;
@@ -156,10 +182,8 @@ class SourceBufferRange {
bool EndOverlaps(const SourceBufferRange& range) const;
// Returns true if |timestamp| is the timestamp of the next buffer in
- // sequence after |buffer|, false otherwise.
- bool IsNextInSequence(
- const scoped_refptr<media::StreamParserBuffer>& buffer,
- base::TimeDelta timestamp) const;
+ // sequence after |buffers_.back()|, false otherwise.
+ bool IsNextInSequence(base::TimeDelta timestamp, bool is_keyframe) const;
int size_in_bytes() const { return size_in_bytes_; }
@@ -204,6 +228,9 @@ class SourceBufferRange {
// Returns the approximate duration of a buffer in this range.
base::TimeDelta GetApproximateDuration() const;
+ // True if this object stores video data.
+ bool is_video_;
+
// An ordered list of buffers in this range.
BufferQueue buffers_;
@@ -341,6 +368,27 @@ SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
video_configs_.push_back(video_config);
}
+SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
+ const LogCB& log_cb)
+ : log_cb_(log_cb),
+ current_config_index_(0),
+ append_config_index_(0),
+ text_track_config_(text_config),
+ seek_pending_(false),
+ end_of_stream_(false),
+ seek_buffer_timestamp_(kNoTimestamp()),
+ selected_range_(NULL),
+ media_segment_start_time_(kNoTimestamp()),
+ range_for_next_append_(ranges_.end()),
+ new_media_segment_(false),
+ last_appended_buffer_timestamp_(kNoTimestamp()),
+ last_appended_buffer_is_keyframe_(false),
+ last_output_buffer_timestamp_(kNoTimestamp()),
+ max_interbuffer_distance_(kNoTimestamp()),
+ memory_limit_(kDefaultAudioMemoryLimit),
+ config_change_pending_(false) {
+}
+
SourceBufferStream::~SourceBufferStream() {
while (!ranges_.empty()) {
delete ranges_.front();
@@ -364,14 +412,14 @@ void SourceBufferStream::OnNewMediaSegment(
media_segment_start_time)) {
last_appended_buffer_timestamp_ = kNoTimestamp();
last_appended_buffer_is_keyframe_ = false;
- } else {
+ } else if (last_range != ranges_.end()) {
DCHECK(last_range == range_for_next_append_);
}
}
bool SourceBufferStream::Append(
const SourceBufferStream::BufferQueue& buffers) {
- TRACE_EVENT2("mse", "SourceBufferStream::Append",
+ TRACE_EVENT2("media", "SourceBufferStream::Append",
"stream type", GetStreamTypeName(),
"buffers to append", buffers.size());
@@ -396,6 +444,14 @@ bool SourceBufferStream::Append(
return false;
}
+ if (!IsNextTimestampValid(buffers.front()->GetDecodeTimestamp(),
+ buffers.front()->IsKeyframe())) {
+ MEDIA_LOG(log_cb_) << "Invalid same timestamp construct detected at time "
+ << buffers.front()->GetDecodeTimestamp().InSecondsF();
+
+ return false;
+ }
+
UpdateMaxInterbufferDistance(buffers);
SetConfigIds(buffers);
@@ -403,32 +459,60 @@ bool SourceBufferStream::Append(
base::TimeDelta next_buffer_timestamp = GetNextBufferTimestamp();
BufferQueue deleted_buffers;
- RangeList::iterator range_for_new_buffers = range_for_next_append_;
+ PrepareRangesForNextAppend(buffers, &deleted_buffers);
+
// If there's a range for |buffers|, insert |buffers| accordingly. Otherwise,
// create a new range with |buffers|.
- if (range_for_new_buffers != ranges_.end()) {
- if (!InsertIntoExistingRange(range_for_new_buffers, buffers,
- &deleted_buffers)) {
- return false;
- }
+ if (range_for_next_append_ != ranges_.end()) {
+ (*range_for_next_append_)->AppendBuffersToEnd(buffers);
+ last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
+ last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
} else {
- DCHECK(new_media_segment_);
- range_for_new_buffers =
+ base::TimeDelta new_range_start_time = media_segment_start_time_;
+ const BufferQueue* buffers_for_new_range = &buffers;
+ BufferQueue trimmed_buffers;
+
+ // If the new range is not being created because of a new media
+ // segment, then we must make sure that we start with a keyframe.
+ // This can happen if the GOP in the previous append gets destroyed
+ // by a Remove() call.
+ if (!new_media_segment_ && !buffers.front()->IsKeyframe()) {
+ BufferQueue::const_iterator itr = buffers.begin();
+
+ // Scan past all the non-keyframes.
+ while (itr != buffers.end() && !(*itr)->IsKeyframe()) {
+ ++itr;
+ }
+
+ // If we didn't find a keyframe, then update the last appended
+ // buffer state and return.
+ if (itr == buffers.end()) {
+ last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
+ last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
+ return true;
+ }
+
+ // Copy the first keyframe and everything after it into |trimmed_buffers|.
+ trimmed_buffers.assign(itr, buffers.end());
+
+ new_range_start_time = trimmed_buffers.front()->GetDecodeTimestamp();
+ buffers_for_new_range = &trimmed_buffers;
+ }
+
+ range_for_next_append_ =
AddToRanges(new SourceBufferRange(
- buffers, media_segment_start_time_,
+ is_video(), *buffers_for_new_range, new_range_start_time,
base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
base::Unretained(this))));
+ last_appended_buffer_timestamp_ =
+ buffers_for_new_range->back()->GetDecodeTimestamp();
+ last_appended_buffer_is_keyframe_ =
+ buffers_for_new_range->back()->IsKeyframe();
}
- range_for_next_append_ = range_for_new_buffers;
new_media_segment_ = false;
- last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
- last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
- // Resolve overlaps.
- ResolveCompleteOverlaps(range_for_new_buffers, &deleted_buffers);
- ResolveEndOverlap(range_for_new_buffers, &deleted_buffers);
- MergeWithAdjacentRangeIfNecessary(range_for_new_buffers);
+ MergeWithAdjacentRangeIfNecessary(range_for_next_append_);
// Seek to try to fulfill a previous call to Seek().
if (seek_pending_) {
@@ -487,16 +571,34 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
remove_end_timestamp = end;
}
+ BufferQueue deleted_buffers;
+ RemoveInternal(start, remove_end_timestamp, false, &deleted_buffers);
+
+ if (!deleted_buffers.empty())
+ SetSelectedRangeIfNeeded(deleted_buffers.front()->GetDecodeTimestamp());
+}
+
+void SourceBufferStream::RemoveInternal(
+ base::TimeDelta start, base::TimeDelta end, bool is_exclusive,
+ BufferQueue* deleted_buffers) {
+ DVLOG(1) << __FUNCTION__ << "(" << start.InSecondsF()
+ << ", " << end.InSecondsF()
+ << ", " << is_exclusive << ")";
+
+ DCHECK(start >= base::TimeDelta());
+ DCHECK(start < end) << "start " << start.InSecondsF()
+ << " end " << end.InSecondsF();
+ DCHECK(deleted_buffers);
+
RangeList::iterator itr = ranges_.begin();
while (itr != ranges_.end()) {
SourceBufferRange* range = *itr;
- if (range->GetStartTimestamp() >= remove_end_timestamp)
+ if (range->GetStartTimestamp() >= end)
break;
// Split off any remaining end piece and add it to |ranges_|.
- SourceBufferRange* new_range =
- range->SplitRange(remove_end_timestamp, false);
+ SourceBufferRange* new_range = range->SplitRange(end, is_exclusive);
if (new_range) {
itr = ranges_.insert(++itr, new_range);
--itr;
@@ -508,27 +610,50 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
}
// If the current range now is completely covered by the removal
- // range then delete it and move on.
- if (start <= range->GetStartTimestamp()) {
- if (selected_range_ == range)
- SetSelectedRange(NULL);
-
- delete range;
- itr = ranges_.erase(itr);
- continue;
- }
+ // range then we want to delete it.
+ bool delete_range = start < range->GetStartTimestamp() ||
+ (!is_exclusive && start == range->GetStartTimestamp());
// Truncate the current range so that it only contains data before
// the removal range.
BufferQueue saved_buffers;
- range->TruncateAt(start, &saved_buffers, false);
+ range->TruncateAt(start, &saved_buffers, is_exclusive);
// Check to see if the current playback position was removed and
// update the selected range appropriately.
if (!saved_buffers.empty()) {
DCHECK(!range->HasNextBufferPosition());
+ DCHECK(deleted_buffers->empty());
+
+ *deleted_buffers = saved_buffers;
+ }
+
+ if (range == selected_range_ && !range->HasNextBufferPosition())
SetSelectedRange(NULL);
- SetSelectedRangeIfNeeded(saved_buffers.front()->GetDecodeTimestamp());
+
+ // If the current range now is completely covered by the removal
+ // range then delete it and move on.
+ if (delete_range) {
+ DeleteAndRemoveRange(&itr);
+ continue;
+ }
+
+ // Clear |range_for_next_append_| if we determine that the removal
+ // operation makes it impossible for the next append to be added
+ // to the current range.
+ if (range_for_next_append_ != ranges_.end() &&
+ *range_for_next_append_ == range &&
+ last_appended_buffer_timestamp_ != kNoTimestamp()) {
+ base::TimeDelta potential_next_append_timestamp =
+ last_appended_buffer_timestamp_ +
+ base::TimeDelta::FromInternalValue(1);
+
+ if (!range->BelongsToRange(potential_next_append_timestamp)) {
+ DVLOG(1) << "Resetting range_for_next_append_ since the next append"
+ << " can't add to the current range.";
+ range_for_next_append_ =
+ FindExistingRangeFor(potential_next_append_timestamp);
+ }
}
// Move on to the next range.
@@ -537,6 +662,7 @@ void SourceBufferStream::Remove(base::TimeDelta start, base::TimeDelta end,
DCHECK(IsRangeListSorted(ranges_));
DCHECK(OnlySelectedRangeIsSeeked());
+ DVLOG(1) << __FUNCTION__ << " : done";
}
void SourceBufferStream::ResetSeekState() {
@@ -556,21 +682,6 @@ bool SourceBufferStream::ShouldSeekToStartOfBuffered(
beginning_of_buffered < kSeekToStartFudgeRoom());
}
-// Buffers with the same timestamp are only allowed under certain conditions.
-// Video: Allowed when the previous frame and current frame are NOT keyframes.
-// This is the situation for VP8 Alt-Ref frames.
-// Otherwise: Allowed in all situations except where a non-keyframe is followed
-// by a keyframe.
-// Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
-// same timestamp situation that is allowed. False is returned otherwise.
-bool SourceBufferStream::AllowSameTimestamp(
- bool prev_is_keyframe, bool current_is_keyframe) const {
- if (video_configs_.size() > 0)
- return !prev_is_keyframe && !current_is_keyframe;
-
- return prev_is_keyframe || !current_is_keyframe;
-}
-
bool SourceBufferStream::IsMonotonicallyIncreasing(
const BufferQueue& buffers) const {
DCHECK(!buffers.empty());
@@ -589,7 +700,8 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
}
if (current_timestamp == prev_timestamp &&
- !AllowSameTimestamp(prev_is_keyframe, current_is_keyframe)) {
+ !AllowSameTimestamp(prev_is_keyframe, current_is_keyframe,
+ is_video())) {
MEDIA_LOG(log_cb_) << "Unexpected combination of buffers with the"
<< " same timestamp detected at "
<< current_timestamp.InSecondsF();
@@ -603,6 +715,15 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
return true;
}
+bool SourceBufferStream::IsNextTimestampValid(
+ base::TimeDelta next_timestamp, bool next_is_keyframe) const {
+ return (last_appended_buffer_timestamp_ != next_timestamp) ||
+ new_media_segment_ ||
+ AllowSameTimestamp(last_appended_buffer_is_keyframe_, next_is_keyframe,
+ is_video());
+}
+
+
bool SourceBufferStream::OnlySelectedRangeIsSeeked() const {
for (RangeList::const_iterator itr = ranges_.begin();
itr != ranges_.end(); ++itr) {
@@ -653,17 +774,76 @@ void SourceBufferStream::GarbageCollectIfNeeded() {
int bytes_to_free = ranges_size - memory_limit_;
+ // Begin deleting after the last appended buffer.
+ int bytes_freed = FreeBuffersAfterLastAppended(bytes_to_free);
+
// Begin deleting from the front.
- int bytes_freed = FreeBuffers(bytes_to_free, false);
+ if (bytes_to_free - bytes_freed > 0)
+ bytes_freed += FreeBuffers(bytes_to_free - bytes_freed, false);
// Begin deleting from the back.
if (bytes_to_free - bytes_freed > 0)
FreeBuffers(bytes_to_free - bytes_freed, true);
}
+int SourceBufferStream::FreeBuffersAfterLastAppended(int total_bytes_to_free) {
+ base::TimeDelta next_buffer_timestamp = GetNextBufferTimestamp();
+ if (last_appended_buffer_timestamp_ == kNoTimestamp() ||
+ next_buffer_timestamp == kNoTimestamp() ||
+ last_appended_buffer_timestamp_ >= next_buffer_timestamp) {
+ return 0;
+ }
+
+ base::TimeDelta remove_range_start = last_appended_buffer_timestamp_;
+ if (last_appended_buffer_is_keyframe_)
+ remove_range_start += GetMaxInterbufferDistance();
+
+ base::TimeDelta remove_range_start_keyframe = FindKeyframeAfterTimestamp(
+ remove_range_start);
+ if (remove_range_start_keyframe != kNoTimestamp())
+ remove_range_start = remove_range_start_keyframe;
+ if (remove_range_start >= next_buffer_timestamp)
+ return 0;
+
+ base::TimeDelta remove_range_end;
+ int bytes_freed = GetRemovalRange(
+ remove_range_start, next_buffer_timestamp, total_bytes_to_free,
+ &remove_range_end);
+ if (bytes_freed > 0)
+ Remove(remove_range_start, remove_range_end, next_buffer_timestamp);
+ return bytes_freed;
+}
+
+int SourceBufferStream::GetRemovalRange(
+ base::TimeDelta start_timestamp, base::TimeDelta end_timestamp,
+ int total_bytes_to_free, base::TimeDelta* removal_end_timestamp) {
+ DCHECK(start_timestamp >= base::TimeDelta()) << start_timestamp.InSecondsF();
+ DCHECK(start_timestamp < end_timestamp)
+ << "start " << start_timestamp.InSecondsF()
+ << ", end " << end_timestamp.InSecondsF();
+
+ int bytes_to_free = total_bytes_to_free;
+ int bytes_freed = 0;
+
+ for (RangeList::iterator itr = ranges_.begin();
+ itr != ranges_.end() && bytes_to_free > 0; ++itr) {
+ SourceBufferRange* range = *itr;
+ if (range->GetStartTimestamp() >= end_timestamp)
+ break;
+ if (range->GetEndTimestamp() < start_timestamp)
+ continue;
+
+ int bytes_removed = range->GetRemovalGOP(
+ start_timestamp, end_timestamp, bytes_to_free, removal_end_timestamp);
+ bytes_to_free -= bytes_removed;
+ bytes_freed += bytes_removed;
+ }
+ return bytes_freed;
+}
+
int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
bool reverse_direction) {
- TRACE_EVENT2("mse", "SourceBufferStream::FreeBuffers",
+ TRACE_EVENT2("media", "SourceBufferStream::FreeBuffers",
"total bytes to free", total_bytes_to_free,
"reverse direction", reverse_direction);
@@ -703,9 +883,9 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
DCHECK(!new_range_for_append);
// Create a new range containing these buffers.
new_range_for_append = new SourceBufferRange(
- buffers, kNoTimestamp(),
- base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
- base::Unretained(this)));
+ is_video(), buffers, kNoTimestamp(),
+ base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
+ base::Unretained(this)));
range_for_next_append_ = ranges_.end();
} else {
bytes_to_free -= bytes_deleted;
@@ -741,23 +921,21 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
return bytes_freed;
}
-bool SourceBufferStream::InsertIntoExistingRange(
- const RangeList::iterator& range_for_new_buffers_itr,
+void SourceBufferStream::PrepareRangesForNextAppend(
const BufferQueue& new_buffers, BufferQueue* deleted_buffers) {
DCHECK(deleted_buffers);
- SourceBufferRange* range_for_new_buffers = *range_for_new_buffers_itr;
-
bool temporarily_select_range = false;
if (!track_buffer_.empty()) {
base::TimeDelta tb_timestamp = track_buffer_.back()->GetDecodeTimestamp();
base::TimeDelta seek_timestamp = FindKeyframeAfterTimestamp(tb_timestamp);
if (seek_timestamp != kNoTimestamp() &&
seek_timestamp < new_buffers.front()->GetDecodeTimestamp() &&
- range_for_new_buffers->BelongsToRange(seek_timestamp)) {
+ range_for_next_append_ != ranges_.end() &&
+ (*range_for_next_append_)->BelongsToRange(seek_timestamp)) {
DCHECK(tb_timestamp < seek_timestamp);
DCHECK(!selected_range_);
- DCHECK(!range_for_new_buffers->HasNextBufferPosition());
+ DCHECK(!(*range_for_next_append_)->HasNextBufferPosition());
// If there are GOPs between the end of the track buffer and the
// beginning of the new buffers, then temporarily seek the range
@@ -765,7 +943,7 @@ bool SourceBufferStream::InsertIntoExistingRange(
// |deleted_buffers| as if they were part of the current playback
// position.
// TODO(acolwell): Figure out a more elegant way to do this.
- SeekAndSetSelectedRange(range_for_new_buffers, seek_timestamp);
+ SeekAndSetSelectedRange(*range_for_next_append_, seek_timestamp);
temporarily_select_range = true;
}
}
@@ -778,74 +956,35 @@ bool SourceBufferStream::InsertIntoExistingRange(
if (prev_timestamp != kNoTimestamp() && prev_timestamp != next_timestamp) {
// Clean up the old buffers between the last appended buffer and the
// beginning of |new_buffers|.
- DeleteBetween(
- range_for_new_buffers_itr, prev_timestamp, next_timestamp, true,
- deleted_buffers);
+ RemoveInternal(prev_timestamp, next_timestamp, true, deleted_buffers);
}
- bool is_exclusive = false;
- if (prev_timestamp == next_timestamp) {
- if (!new_media_segment_ &&
- !AllowSameTimestamp(prev_is_keyframe, next_is_keyframe)) {
- MEDIA_LOG(log_cb_) << "Invalid same timestamp construct detected at time "
- << prev_timestamp.InSecondsF();
- return false;
- }
+ // Make the delete range exclusive if we are dealing with an allowed same
+ // timestamp situation. This prevents the first buffer in the current append
+ // from deleting the last buffer in the previous append if both buffers
+ // have the same timestamp.
+ bool is_exclusive = (prev_timestamp == next_timestamp) &&
+ AllowSameTimestamp(prev_is_keyframe, next_is_keyframe, is_video());
- // Make the delete range exclusive if we are dealing with an allowed same
- // timestamp situation so that the buffer with the same timestamp that is
- // already stored in |*range_for_new_buffers_itr| doesn't get deleted.
- is_exclusive = AllowSameTimestamp(prev_is_keyframe, next_is_keyframe);
- }
+ // Delete the buffers that |new_buffers| overlaps.
+ base::TimeDelta start = new_buffers.front()->GetDecodeTimestamp();
+ base::TimeDelta end = new_buffers.back()->GetDecodeTimestamp();
+ base::TimeDelta duration = new_buffers.back()->duration();
- // If we cannot append the |new_buffers| to the end of the existing range,
- // this is either a start overlap or a middle overlap. Delete the buffers
- // that |new_buffers| overlaps.
- if (!range_for_new_buffers->CanAppendBuffersToEnd(new_buffers)) {
- DeleteBetween(
- range_for_new_buffers_itr, new_buffers.front()->GetDecodeTimestamp(),
- new_buffers.back()->GetDecodeTimestamp(), is_exclusive,
- deleted_buffers);
+ if (duration != kNoTimestamp() && duration > base::TimeDelta()) {
+ end += duration;
+ } else {
+ // TODO(acolwell): Ensure all buffers actually have proper
+ // duration info so that this hack isn't needed.
+ // http://crbug.com/312836
+ end += base::TimeDelta::FromInternalValue(1);
}
+ RemoveInternal(start, end, is_exclusive, deleted_buffers);
+
// Restore the range seek state if necessary.
if (temporarily_select_range)
SetSelectedRange(NULL);
-
- range_for_new_buffers->AppendBuffersToEnd(new_buffers);
- return true;
-}
-
-void SourceBufferStream::DeleteBetween(
- const RangeList::iterator& range_itr, base::TimeDelta start_timestamp,
- base::TimeDelta end_timestamp, bool is_range_exclusive,
- BufferQueue* deleted_buffers) {
- SourceBufferRange* new_next_range =
- (*range_itr)->SplitRange(end_timestamp, is_range_exclusive);
-
- // Insert the |new_next_range| into |ranges_| after |range|.
- if (new_next_range) {
- RangeList::iterator next_range_itr = range_itr;
- ranges_.insert(++next_range_itr, new_next_range);
- }
-
- BufferQueue saved_buffers;
- (*range_itr)->TruncateAt(start_timestamp, &saved_buffers, is_range_exclusive);
-
- if (selected_range_ != *range_itr)
- return;
-
- DCHECK(deleted_buffers->empty());
- *deleted_buffers = saved_buffers;
-
- // If the next buffer position has transferred to the split range, set the
- // selected range accordingly.
- if (new_next_range && new_next_range->HasNextBufferPosition()) {
- DCHECK(!(*range_itr)->HasNextBufferPosition());
- SetSelectedRange(new_next_range);
- } else if (!selected_range_->HasNextBufferPosition()) {
- SetSelectedRange(NULL);
- }
}
bool SourceBufferStream::AreAdjacentInSequence(
@@ -855,77 +994,6 @@ bool SourceBufferStream::AreAdjacentInSequence(
first_timestamp + ComputeFudgeRoom(GetMaxInterbufferDistance());
}
-void SourceBufferStream::ResolveCompleteOverlaps(
- const RangeList::iterator& range_with_new_buffers_itr,
- BufferQueue* deleted_buffers) {
- DCHECK(deleted_buffers);
-
- SourceBufferRange* range_with_new_buffers = *range_with_new_buffers_itr;
- RangeList::iterator next_range_itr = range_with_new_buffers_itr;
- ++next_range_itr;
-
- while (next_range_itr != ranges_.end() &&
- range_with_new_buffers->CompletelyOverlaps(**next_range_itr)) {
- if (*next_range_itr == selected_range_) {
- DCHECK(deleted_buffers->empty());
- selected_range_->DeleteAll(deleted_buffers);
- SetSelectedRange(NULL);
- }
- delete *next_range_itr;
- next_range_itr = ranges_.erase(next_range_itr);
- }
-}
-
-void SourceBufferStream::ResolveEndOverlap(
- const RangeList::iterator& range_with_new_buffers_itr,
- BufferQueue* deleted_buffers) {
- DCHECK(deleted_buffers);
-
- SourceBufferRange* range_with_new_buffers = *range_with_new_buffers_itr;
- RangeList::iterator next_range_itr = range_with_new_buffers_itr;
- ++next_range_itr;
-
- if (next_range_itr == ranges_.end() ||
- !range_with_new_buffers->EndOverlaps(**next_range_itr)) {
- return;
- }
-
- // Split the overlapped range after |range_with_new_buffers|'s last buffer
- // overlaps. Now |overlapped_range| contains only the buffers that do not
- // belong in |ranges_| anymore, and |new_next_range| contains buffers that
- // go after |range_with_new_buffers| (without overlap).
- scoped_ptr<SourceBufferRange> overlapped_range(*next_range_itr);
- next_range_itr = ranges_.erase(next_range_itr);
-
- SourceBufferRange* new_next_range =
- overlapped_range->SplitRange(
- range_with_new_buffers->GetEndTimestamp(), true);
-
- // If there were non-overlapped buffers, add the new range to |ranges_|.
- if (new_next_range)
- AddToRanges(new_next_range);
-
- // If we didn't overlap a selected range, return.
- if (selected_range_ != overlapped_range)
- return;
-
- // If the |overlapped_range| transfers its next buffer position to
- // |new_next_range|, make |new_next_range| the |selected_range_|.
- if (new_next_range && new_next_range->HasNextBufferPosition()) {
- DCHECK(!overlapped_range->HasNextBufferPosition());
- SetSelectedRange(new_next_range);
- return;
- }
-
- // Save the buffers in |overlapped_range|.
- DCHECK(deleted_buffers->empty());
- DCHECK_EQ(overlapped_range.get(), selected_range_);
- overlapped_range->DeleteAll(deleted_buffers);
-
- // |overlapped_range| will be deleted, so set |selected_range_| to NULL.
- SetSelectedRange(NULL);
-}
-
void SourceBufferStream::PruneTrackBuffer(const base::TimeDelta timestamp) {
// If we don't have the next timestamp, we don't have anything to delete.
if (timestamp == kNoTimestamp())
@@ -939,26 +1007,29 @@ void SourceBufferStream::PruneTrackBuffer(const base::TimeDelta timestamp) {
void SourceBufferStream::MergeWithAdjacentRangeIfNecessary(
const RangeList::iterator& range_with_new_buffers_itr) {
+ DCHECK(range_with_new_buffers_itr != ranges_.end());
+
SourceBufferRange* range_with_new_buffers = *range_with_new_buffers_itr;
RangeList::iterator next_range_itr = range_with_new_buffers_itr;
++next_range_itr;
- if (next_range_itr != ranges_.end() &&
- range_with_new_buffers->CanAppendRangeToEnd(**next_range_itr)) {
- bool transfer_current_position = selected_range_ == *next_range_itr;
- range_with_new_buffers->AppendRangeToEnd(**next_range_itr,
- transfer_current_position);
- // Update |selected_range_| pointer if |range| has become selected after
- // merges.
- if (transfer_current_position)
- SetSelectedRange(range_with_new_buffers);
+ if (next_range_itr == ranges_.end() ||
+ !range_with_new_buffers->CanAppendRangeToEnd(**next_range_itr)) {
+ return;
+ }
+
+ bool transfer_current_position = selected_range_ == *next_range_itr;
+ range_with_new_buffers->AppendRangeToEnd(**next_range_itr,
+ transfer_current_position);
+ // Update |selected_range_| pointer if |range| has become selected after
+ // merges.
+ if (transfer_current_position)
+ SetSelectedRange(range_with_new_buffers);
- if (next_range_itr == range_for_next_append_)
- range_for_next_append_ = range_with_new_buffers_itr;
+ if (next_range_itr == range_for_next_append_)
+ range_for_next_append_ = range_with_new_buffers_itr;
- delete *next_range_itr;
- ranges_.erase(next_range_itr);
- }
+ DeleteAndRemoveRange(&next_range_itr);
}
void SourceBufferStream::Seek(base::TimeDelta timestamp) {
@@ -1013,10 +1084,9 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
while (itr != ranges_.end()) {
// If we're about to delete the selected range, also reset the seek state.
DCHECK((*itr)->GetStartTimestamp() >= duration);
- if (*itr== selected_range_)
+ if (*itr == selected_range_)
ResetSeekState();
- delete *itr;
- itr = ranges_.erase(itr);
+ DeleteAndRemoveRange(&itr);
}
}
@@ -1118,6 +1188,7 @@ void SourceBufferStream::SeekAndSetSelectedRange(
}
void SourceBufferStream::SetSelectedRange(SourceBufferRange* range) {
+ DVLOG(1) << __FUNCTION__ << " : " << selected_range_ << " -> " << range;
if (selected_range_)
selected_range_->ResetNextBufferPosition();
DCHECK(!range || range->HasNextBufferPosition());
@@ -1165,6 +1236,10 @@ const VideoDecoderConfig& SourceBufferStream::GetCurrentVideoDecoderConfig() {
return video_configs_[current_config_index_];
}
+const TextTrackConfig& SourceBufferStream::GetCurrentTextTrackConfig() {
+ return text_track_config_;
+}
+
base::TimeDelta SourceBufferStream::GetMaxInterbufferDistance() const {
if (max_interbuffer_distance_ == kNoTimestamp())
return base::TimeDelta::FromMilliseconds(kDefaultBufferDurationInMs);
@@ -1396,10 +1471,30 @@ std::string SourceBufferStream::GetStreamTypeName() const {
return "AUDIO";
}
+void SourceBufferStream::DeleteAndRemoveRange(RangeList::iterator* itr) {
+ DVLOG(1) << __FUNCTION__;
+
+ DCHECK(*itr != ranges_.end());
+ if (**itr == selected_range_) {
+ DVLOG(1) << __FUNCTION__ << " deleting selected range.";
+ SetSelectedRange(NULL);
+ }
+
+ if (*itr == range_for_next_append_) {
+ DVLOG(1) << __FUNCTION__ << " deleting range_for_next_append_.";
+ range_for_next_append_ = ranges_.end();
+ }
+
+ delete **itr;
+ *itr = ranges_.erase(*itr);
+}
+
SourceBufferRange::SourceBufferRange(
- const BufferQueue& new_buffers, base::TimeDelta media_segment_start_time,
+ bool is_video, const BufferQueue& new_buffers,
+ base::TimeDelta media_segment_start_time,
const InterbufferDistanceCB& interbuffer_distance_cb)
- : keyframe_map_index_base_(0),
+ : is_video_(is_video),
+ keyframe_map_index_base_(0),
next_buffer_index_(-1),
media_segment_start_time_(media_segment_start_time),
interbuffer_distance_cb_(interbuffer_distance_cb),
@@ -1411,9 +1506,7 @@ SourceBufferRange::SourceBufferRange(
}
void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
- DCHECK(buffers_.empty() ||
- buffers_.back()->GetDecodeTimestamp() <=
- new_buffers.front()->GetDecodeTimestamp());
+ DCHECK(buffers_.empty() || CanAppendBuffersToEnd(new_buffers));
for (BufferQueue::const_iterator itr = new_buffers.begin();
itr != new_buffers.end(); ++itr) {
@@ -1492,7 +1585,7 @@ SourceBufferRange* SourceBufferRange::SplitRange(
// Create a new range with |removed_buffers|.
SourceBufferRange* split_range =
new SourceBufferRange(
- removed_buffers, kNoTimestamp(), interbuffer_distance_cb_);
+ is_video_, removed_buffers, kNoTimestamp(), interbuffer_distance_cb_);
// If the next buffer position is now in |split_range|, update the state of
// this range and |split_range| accordingly.
@@ -1629,6 +1722,47 @@ int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
return total_bytes_deleted;
}
+int SourceBufferRange::GetRemovalGOP(
+ base::TimeDelta start_timestamp, base::TimeDelta end_timestamp,
+ int total_bytes_to_free, base::TimeDelta* removal_end_timestamp) {
+ int bytes_to_free = total_bytes_to_free;
+ int bytes_removed = 0;
+
+ KeyframeMap::iterator gop_itr = GetFirstKeyframeAt(start_timestamp, false);
+ if (gop_itr == keyframe_map_.end())
+ return 0;
+ int keyframe_index = gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::iterator buffer_itr = buffers_.begin() + keyframe_index;
+ KeyframeMap::iterator gop_end = keyframe_map_.end();
+ if (end_timestamp < GetBufferedEndTimestamp())
+ gop_end = GetFirstKeyframeBefore(end_timestamp);
+
+ // Check if the removal range is within a GOP and skip the loop if so.
+ // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
+ KeyframeMap::iterator gop_itr_prev = gop_itr;
+ if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
+ gop_end = gop_itr;
+
+ while (gop_itr != gop_end && bytes_to_free > 0) {
+ ++gop_itr;
+
+ int gop_size = 0;
+ int next_gop_index = gop_itr == keyframe_map_.end() ?
+ buffers_.size() : gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::iterator next_gop_start = buffers_.begin() + next_gop_index;
+ for (; buffer_itr != next_gop_start; ++buffer_itr)
+ gop_size += (*buffer_itr)->data_size();
+
+ bytes_removed += gop_size;
+ bytes_to_free -= gop_size;
+ }
+ if (bytes_removed > 0) {
+ *removal_end_timestamp = gop_itr == keyframe_map_.end() ?
+ GetBufferedEndTimestamp() : gop_itr->first;
+ }
+ return bytes_removed;
+}
+
bool SourceBufferRange::FirstGOPContainsNextBufferPosition() const {
if (!HasNextBufferPosition())
return false;
@@ -1720,7 +1854,6 @@ int SourceBufferRange::GetNextConfigId() const {
return buffers_.at(next_buffer_index_)->GetConfigId();
}
-
base::TimeDelta SourceBufferRange::GetNextTimestamp() const {
DCHECK(!buffers_.empty());
DCHECK(HasNextBufferPosition());
@@ -1759,14 +1892,14 @@ bool SourceBufferRange::CanAppendRangeToEnd(
bool SourceBufferRange::CanAppendBuffersToEnd(
const BufferQueue& buffers) const {
DCHECK(!buffers_.empty());
- return IsNextInSequence(buffers_.back(),
- buffers.front()->GetDecodeTimestamp());
+ return IsNextInSequence(buffers.front()->GetDecodeTimestamp(),
+ buffers.front()->IsKeyframe());
}
bool SourceBufferRange::BelongsToRange(base::TimeDelta timestamp) const {
DCHECK(!buffers_.empty());
- return (IsNextInSequence(buffers_.back(), timestamp) ||
+ return (IsNextInSequence(timestamp, false) ||
(GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
}
@@ -1833,10 +1966,11 @@ base::TimeDelta SourceBufferRange::KeyframeBeforeTimestamp(
}
bool SourceBufferRange::IsNextInSequence(
- const scoped_refptr<media::StreamParserBuffer>& buffer,
- base::TimeDelta timestamp) const {
- return buffer->GetDecodeTimestamp() < timestamp &&
- timestamp <= buffer->GetDecodeTimestamp() + GetFudgeRoom();
+ base::TimeDelta timestamp, bool is_keyframe) const {
+ base::TimeDelta end = buffers_.back()->GetDecodeTimestamp();
+ return (end < timestamp && timestamp <= end + GetFudgeRoom()) ||
+ (timestamp == end && AllowSameTimestamp(
+ buffers_.back()->IsKeyframe(), is_keyframe, is_video_));
}
base::TimeDelta SourceBufferRange::GetFudgeRoom() const {
diff --git a/chromium/media/filters/source_buffer_stream.h b/chromium/media/filters/source_buffer_stream.h
index 6167e519e73..4b00504cfb2 100644
--- a/chromium/media/filters/source_buffer_stream.h
+++ b/chromium/media/filters/source_buffer_stream.h
@@ -22,6 +22,7 @@
#include "media/base/media_log.h"
#include "media/base/ranges.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
namespace media {
@@ -49,6 +50,8 @@ class MEDIA_EXPORT SourceBufferStream {
const LogCB& log_cb);
SourceBufferStream(const VideoDecoderConfig& video_config,
const LogCB& log_cb);
+ SourceBufferStream(const TextTrackConfig& text_config,
+ const LogCB& log_cb);
~SourceBufferStream();
@@ -107,6 +110,7 @@ class MEDIA_EXPORT SourceBufferStream {
const AudioDecoderConfig& GetCurrentAudioDecoderConfig();
const VideoDecoderConfig& GetCurrentVideoDecoderConfig();
+ const TextTrackConfig& GetCurrentTextTrackConfig();
// Notifies this object that the audio config has changed and buffers in
// future Append() calls should be associated with this new config.
@@ -126,6 +130,8 @@ class MEDIA_EXPORT SourceBufferStream {
}
private:
+ friend class SourceBufferStreamTest;
+
typedef std::list<SourceBufferRange*> RangeList;
// Frees up space if the SourceBufferStream is taking up too much memory.
@@ -137,29 +143,28 @@ class MEDIA_EXPORT SourceBufferStream {
// is true. Returns the number of bytes freed.
int FreeBuffers(int total_bytes_to_free, bool reverse_direction);
- // Appends |new_buffers| into |range_for_new_buffers_itr|, handling start and
- // end overlaps if necessary.
+ // Attempts to delete approximately |total_bytes_to_free| amount of data from
+ // |ranges_|, starting after the last appended buffer before the current
+ // playback position.
+ int FreeBuffersAfterLastAppended(int total_bytes_to_free);
+
+ // Gets the removal range to secure |byte_to_free| from
+ // [|start_timestamp|, |end_timestamp|).
+ // Returns the size of buffers to secure if future
+ // Remove(|start_timestamp|, |removal_end_timestamp|, duration) is called.
+ // Will not update |removal_end_timestamp| if the returned size is 0.
+ int GetRemovalRange(base::TimeDelta start_timestamp,
+ base::TimeDelta end_timestamp, int byte_to_free,
+ base::TimeDelta* removal_end_timestamp);
+
+ // Prepares |range_for_next_append_| so |new_buffers| can be appended.
+ // This involves removing buffers between the end of the previous append
+ // and any buffers covered by the time range in |new_buffers|.
// |deleted_buffers| is an output parameter containing candidates for
- // |track_buffer_|.
- // Returns true if the buffers were successfully inserted into the existing
- // range.
- // Returns false if the buffers being inserted triggered an error.
- bool InsertIntoExistingRange(
- const RangeList::iterator& range_for_new_buffers_itr,
- const BufferQueue& new_buffers,
- BufferQueue* deleted_buffers);
-
- // Resolve overlapping ranges such that no ranges overlap anymore.
- // |range_with_new_buffers_itr| points to the range that has newly appended
- // buffers.
- // |deleted_buffers| is an output parameter containing candidates for
- // |track_buffer_|.
- void ResolveCompleteOverlaps(
- const RangeList::iterator& range_with_new_buffers_itr,
- BufferQueue* deleted_buffers);
- void ResolveEndOverlap(
- const RangeList::iterator& range_with_new_buffers_itr,
- BufferQueue* deleted_buffers);
+ // |track_buffer_| if this method ends up removing the current playback
+ // position from the range.
+ void PrepareRangesForNextAppend(const BufferQueue& new_buffers,
+ BufferQueue* deleted_buffers);
// Removes buffers, from the |track_buffer_|, that come after |timestamp|.
void PruneTrackBuffer(const base::TimeDelta timestamp);
@@ -169,17 +174,6 @@ class MEDIA_EXPORT SourceBufferStream {
void MergeWithAdjacentRangeIfNecessary(
const RangeList::iterator& range_with_new_buffers_itr);
- // Deletes the buffers between |start_timestamp|, |end_timestamp| from
- // the range that |range_itr| points to. Deletes between [start,end] if
- // |is_range_exclusive| is true, or (start,end) if |is_range_exclusive| is
- // false. Buffers are deleted in GOPs, so this method may delete buffers past
- // |end_timestamp| if the keyframe a buffer depends on was deleted.
- void DeleteBetween(const RangeList::iterator& range_itr,
- base::TimeDelta start_timestamp,
- base::TimeDelta end_timestamp,
- bool is_range_exclusive,
- BufferQueue* deleted_buffers);
-
// Returns true if |second_timestamp| is the timestamp of the next buffer in
// sequence after |first_timestamp|, false otherwise.
bool AreAdjacentInSequence(
@@ -223,16 +217,15 @@ class MEDIA_EXPORT SourceBufferStream {
// in |ranges_|, false otherwise or if |ranges_| is empty.
bool ShouldSeekToStartOfBuffered(base::TimeDelta seek_timestamp) const;
- // Returns true if the |prev_is_keyframe| & |current_is_keyframe| combination
- // on buffers with the same timestamp should be allowed. Returns false if the
- // combination should signal an error.
- bool AllowSameTimestamp(bool prev_is_keyframe,
- bool current_is_keyframe) const;
-
// Returns true if the timestamps of |buffers| are monotonically increasing
// since the previous append to the media segment, false otherwise.
bool IsMonotonicallyIncreasing(const BufferQueue& buffers) const;
+ // Returns true if |next_timestamp| and |next_is_keyframe| are valid for
+ // the first buffer after the previous append.
+ bool IsNextTimestampValid(base::TimeDelta next_timestamp,
+ bool next_is_keyframe) const;
+
// Returns true if |selected_range_| is the only range in |ranges_| that
// HasNextBufferPosition().
bool OnlySelectedRangeIsSeeked() const;
@@ -275,6 +268,26 @@ class MEDIA_EXPORT SourceBufferStream {
// or there is a pending seek beyond any existing ranges.
bool IsEndSelected() const;
+ // Deletes the range pointed to by |*itr| and removes it from |ranges_|.
+ // If |*itr| points to |selected_range_|, then |selected_range_| is set to
+ // NULL. After the range is removed, |*itr| is to the range after the one that
+ // was removed or to |ranges_.end()| if the last range was removed.
+ void DeleteAndRemoveRange(RangeList::iterator* itr);
+
+ // Helper function used by Remove() and PrepareRangesForNextAppend() to
+ // remove buffers and ranges between |start| and |end|.
+ // |is_exclusive| - If set to true, buffers with timestamps that
+ // match |start| are not removed. If set to false, buffers with
+ // timestamps that match |start| will be removed.
+ // |*deleted_buffers| - Filled with buffers for the current playback position
+ // if the removal range included the current playback position. These buffers
+ // can be used as candidates for placing in the |track_buffer_|.
+ void RemoveInternal(
+ base::TimeDelta start, base::TimeDelta end, bool is_exclusive,
+ BufferQueue* deleted_buffers);
+
+ bool is_video() const { return video_configs_.size() > 0; }
+
// Callback used to report error strings that can help the web developer
// figure out what is wrong with the content.
LogCB log_cb_;
@@ -298,6 +311,9 @@ class MEDIA_EXPORT SourceBufferStream {
std::vector<AudioDecoderConfig> audio_configs_;
std::vector<VideoDecoderConfig> video_configs_;
+ // Holds the text config for this stream.
+ TextTrackConfig text_track_config_;
+
// True if more data needs to be appended before the Seek() can complete,
// false if no Seek() has been requested or the Seek() is completed.
bool seek_pending_;
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 8b648861e86..9e7373a16ad 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -119,6 +119,18 @@ class SourceBufferStreamTest : public testing::Test {
stream_->Remove(start, end, duration);
}
+ int GetRemovalRangeInMs(int start, int end, int bytes_to_free,
+ int* removal_end) {
+ base::TimeDelta removal_end_timestamp =
+ base::TimeDelta::FromMilliseconds(*removal_end);
+ int bytes_removed = stream_->GetRemovalRange(
+ base::TimeDelta::FromMilliseconds(start),
+ base::TimeDelta::FromMilliseconds(end), bytes_to_free,
+ &removal_end_timestamp);
+ *removal_end = removal_end_timestamp.InMilliseconds();
+ return bytes_removed;
+ }
+
void CheckExpectedRanges(const std::string& expected) {
Ranges<base::TimeDelta> r = stream_->GetBufferedTime();
@@ -226,7 +238,7 @@ class SourceBufferStreamTest : public testing::Test {
void CheckNoNextBuffer() {
scoped_refptr<StreamParserBuffer> buffer;
- EXPECT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kNeedBuffer);
+ EXPECT_EQ(SourceBufferStream::kNeedBuffer, stream_->GetNextBuffer(&buffer));
}
void CheckConfig(const VideoDecoderConfig& config) {
@@ -2136,6 +2148,47 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteSeveralRanges) {
CheckNoNextBuffer();
}
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
+ // Set memory limit to 10 buffers.
+ SetMemoryLimit(10);
+
+ // Append 1 GOP starting at 310ms, 30ms apart.
+ NewSegmentAppend("310K 340 370");
+
+ // Append 2 GOPs starting at 490ms, 30ms apart.
+ NewSegmentAppend("490K 520 550 580K 610 640");
+
+ CheckExpectedRangesByTimestamp("{ [310,400) [490,670) }");
+
+ // Seek to the GOP at 580ms.
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(580));
+
+ // Append 2 GOPs before the existing ranges.
+ // So the ranges before GC are "{ [100,280) [310,400) [490,670) }".
+ NewSegmentAppend("100K 130 160 190K 220 250K");
+
+ // Should save the newly appended GOPs.
+ CheckExpectedRangesByTimestamp("{ [100,280) [580,670) }");
+}
+
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppendMerged) {
+ // Set memory limit to 10 buffers.
+ SetMemoryLimit(10);
+
+ // Append 3 GOPs starting at 400ms, 30ms apart.
+ NewSegmentAppend("400K 430 460 490K 520 550 580K 610 640");
+
+ // Seek to the GOP at 580ms.
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(580));
+
+ // Append 2 GOPs starting at 220ms, and they will be merged with the existing
+ // range. So the range before GC is "{ [220,670) }".
+ NewSegmentAppend("220K 250 280 310K 340 370");
+
+ // Should save the newly appended GOPs.
+ CheckExpectedRangesByTimestamp("{ [220,400) [580,670) }");
+}
+
TEST_F(SourceBufferStreamTest, GarbageCollection_NoSeek) {
// Set memory limit to 20 buffers.
SetMemoryLimit(20);
@@ -2479,6 +2532,134 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_Performance) {
}
}
+TEST_F(SourceBufferStreamTest, GetRemovalRange_BytesToFree) {
+ // Append 2 GOPs starting at 300ms, 30ms apart.
+ NewSegmentAppend("300K 330 360 390K 420 450");
+
+ // Append 2 GOPs starting at 600ms, 30ms apart.
+ NewSegmentAppend("600K 630 660 690K 720 750");
+
+ // Append 2 GOPs starting at 900ms, 30ms apart.
+ NewSegmentAppend("900K 930 960 990K 1020 1050");
+
+ CheckExpectedRangesByTimestamp("{ [300,480) [600,780) [900,1080) }");
+
+ int remove_range_end = -1;
+ int bytes_removed = -1;
+
+ // Size 0.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 0, &remove_range_end);
+ EXPECT_EQ(-1, remove_range_end);
+ EXPECT_EQ(0, bytes_removed);
+
+ // Smaller than the size of GOP.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 1, &remove_range_end);
+ EXPECT_EQ(390, remove_range_end);
+ // Remove as the size of GOP.
+ EXPECT_EQ(3, bytes_removed);
+
+ // The same size with a GOP.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 3, &remove_range_end);
+ EXPECT_EQ(390, remove_range_end);
+ EXPECT_EQ(3, bytes_removed);
+
+ // The same size with a range.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 6, &remove_range_end);
+ EXPECT_EQ(480, remove_range_end);
+ EXPECT_EQ(6, bytes_removed);
+
+ // A frame larger than a range.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 7, &remove_range_end);
+ EXPECT_EQ(690, remove_range_end);
+ EXPECT_EQ(9, bytes_removed);
+
+ // The same size with two ranges.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 12, &remove_range_end);
+ EXPECT_EQ(780, remove_range_end);
+ EXPECT_EQ(12, bytes_removed);
+
+ // Larger than two ranges.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 14, &remove_range_end);
+ EXPECT_EQ(990, remove_range_end);
+ EXPECT_EQ(15, bytes_removed);
+
+ // The same size with the whole ranges.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 18, &remove_range_end);
+ EXPECT_EQ(1080, remove_range_end);
+ EXPECT_EQ(18, bytes_removed);
+
+ // Larger than the whole ranges.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 20, &remove_range_end);
+ EXPECT_EQ(1080, remove_range_end);
+ EXPECT_EQ(18, bytes_removed);
+}
+
+TEST_F(SourceBufferStreamTest, GetRemovalRange_Range) {
+ // Append 2 GOPs starting at 300ms, 30ms apart.
+ NewSegmentAppend("300K 330 360 390K 420 450");
+
+ // Append 2 GOPs starting at 600ms, 30ms apart.
+ NewSegmentAppend("600K 630 660 690K 720 750");
+
+ // Append 2 GOPs starting at 900ms, 30ms apart.
+ NewSegmentAppend("900K 930 960 990K 1020 1050");
+
+ CheckExpectedRangesByTimestamp("{ [300,480) [600,780) [900,1080) }");
+
+ int remove_range_end = -1;
+ int bytes_removed = -1;
+
+ // Within a GOP and no keyframe.
+ bytes_removed = GetRemovalRangeInMs(630, 660, 20, &remove_range_end);
+ EXPECT_EQ(-1, remove_range_end);
+ EXPECT_EQ(0, bytes_removed);
+
+ // Across a GOP and no keyframe.
+ bytes_removed = GetRemovalRangeInMs(630, 750, 20, &remove_range_end);
+ EXPECT_EQ(-1, remove_range_end);
+ EXPECT_EQ(0, bytes_removed);
+
+ // The same size with a range.
+ bytes_removed = GetRemovalRangeInMs(600, 780, 20, &remove_range_end);
+ EXPECT_EQ(780, remove_range_end);
+ EXPECT_EQ(6, bytes_removed);
+
+ // One frame larger than a range.
+ bytes_removed = GetRemovalRangeInMs(570, 810, 20, &remove_range_end);
+ EXPECT_EQ(780, remove_range_end);
+ EXPECT_EQ(6, bytes_removed);
+
+ // Facing the other ranges.
+ bytes_removed = GetRemovalRangeInMs(480, 900, 20, &remove_range_end);
+ EXPECT_EQ(780, remove_range_end);
+ EXPECT_EQ(6, bytes_removed);
+
+ // In the midle of the other ranges, but not including any GOP.
+ bytes_removed = GetRemovalRangeInMs(420, 960, 20, &remove_range_end);
+ EXPECT_EQ(780, remove_range_end);
+ EXPECT_EQ(6, bytes_removed);
+
+ // In the middle of the other ranges.
+ bytes_removed = GetRemovalRangeInMs(390, 990, 20, &remove_range_end);
+ EXPECT_EQ(990, remove_range_end);
+ EXPECT_EQ(12, bytes_removed);
+
+ // A frame smaller than the whole ranges.
+ bytes_removed = GetRemovalRangeInMs(330, 1050, 20, &remove_range_end);
+ EXPECT_EQ(990, remove_range_end);
+ EXPECT_EQ(12, bytes_removed);
+
+ // The same with the whole ranges.
+ bytes_removed = GetRemovalRangeInMs(300, 1080, 20, &remove_range_end);
+ EXPECT_EQ(1080, remove_range_end);
+ EXPECT_EQ(18, bytes_removed);
+
+ // Larger than the whole ranges.
+ bytes_removed = GetRemovalRangeInMs(270, 1110, 20, &remove_range_end);
+ EXPECT_EQ(1080, remove_range_end);
+ EXPECT_EQ(18, bytes_removed);
+}
+
TEST_F(SourceBufferStreamTest, ConfigChange_Basic) {
VideoDecoderConfig new_config = TestVideoConfig::Large();
ASSERT_FALSE(new_config.Matches(config_));
@@ -3004,6 +3185,96 @@ TEST_F(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
CheckExpectedBuffers("150 180K 210 240 270K 300 330");
}
+// Test removing the entire range for the current media segment
+// being appended.
+TEST_F(SourceBufferStreamTest, Remove_MidSegment) {
+ Seek(0);
+ NewSegmentAppend("0K 30 60 90 120K 150 180 210");
+ CheckExpectedRangesByTimestamp("{ [0,240) }");
+
+ NewSegmentAppend("0K 30");
+
+ CheckExpectedBuffers("0K");
+
+ CheckExpectedRangesByTimestamp("{ [0,60) [120,240) }");
+
+ // Remove the entire range that is being appended to.
+ RemoveInMs(0, 60, 240);
+
+ // Verify that there is no next buffer since it was removed.
+ CheckNoNextBuffer();
+
+ CheckExpectedRangesByTimestamp("{ [120,240) }");
+
+ // Continue appending frames for the current GOP.
+ AppendBuffers("60 90");
+
+ // Verify that the non-keyframes are not added.
+ CheckExpectedRangesByTimestamp("{ [120,240) }");
+
+ // Finish the previous GOP and start the next one.
+ AppendBuffers("120 150K 180");
+
+ // Verify that new GOP replaces the existing range.
+ CheckExpectedRangesByTimestamp("{ [150,210) }");
+
+
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(150));
+ CheckExpectedBuffers("150K 180");
+ CheckNoNextBuffer();
+}
+
+// Test removing the current GOP being appended, while not removing
+// the entire range the GOP belongs to.
+TEST_F(SourceBufferStreamTest, Remove_GOPBeingAppended) {
+ Seek(0);
+ NewSegmentAppend("0K 30 60 90 120K 150 180");
+ CheckExpectedRangesByTimestamp("{ [0,210) }");
+
+ // Remove the current GOP being appended.
+ RemoveInMs(120, 150, 240);
+ CheckExpectedRangesByTimestamp("{ [0,120) }");
+
+ // Continue appending the current GOP and the next one.
+ AppendBuffers("210 240K 270 300");
+
+ // Verify that the non-keyframe in the previous GOP does
+ // not effect any existing ranges and a new range is started at the
+ // beginning of the next GOP.
+ CheckExpectedRangesByTimestamp("{ [0,120) [240,330) }");
+
+ // Verify the buffers in the ranges.
+ CheckExpectedBuffers("0K 30 60 90");
+ CheckNoNextBuffer();
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(240));
+ CheckExpectedBuffers("240K 270 300");
+}
+
+
+TEST_F(SourceBufferStreamTest,
+ Remove_PreviousAppendDestroyedAndOverwriteExistingRange) {
+ SeekToTimestamp(base::TimeDelta::FromMilliseconds(90));
+
+ NewSegmentAppend("90K 120 150");
+ CheckExpectedRangesByTimestamp("{ [90,180) }");
+
+ // Append a segment before the previously appended data.
+ NewSegmentAppend("0K 30 60");
+
+ // Verify that the ranges get merged.
+ CheckExpectedRangesByTimestamp("{ [0,180) }");
+
+ // Remove the data from the last append.
+ RemoveInMs(0, 90, 360);
+ CheckExpectedRangesByTimestamp("{ [90,180) }");
+
+ // Append a new segment that follows the removed segment and
+ // starts at the beginning of the range left over from the
+ // remove.
+ NewSegmentAppend("90K 121 151");
+ CheckExpectedBuffers("90K 121 151");
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index c41164b60bb..53ee1b7d190 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -13,6 +13,10 @@
#include "media/mp3/mp3_stream_parser.h"
#include "media/webm/webm_stream_parser.h"
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#endif
+
#if defined(USE_PROPRIETARY_CODECS)
#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
#include "media/mp2t/mp2t_stream_parser.h"
@@ -75,11 +79,7 @@ static const CodecInfo kOpusCodecInfo = { "opus", CodecInfo::AUDIO, NULL,
static const CodecInfo* kVideoWebMCodecs[] = {
&kVP8CodecInfo,
-#if !defined(OS_ANDROID)
- // TODO(wonsik): crbug.com/285016 query Android platform for codec
- // capabilities.
&kVP9CodecInfo,
-#endif
&kVorbisCodecInfo,
&kOpusCodecInfo,
NULL
@@ -130,8 +130,10 @@ bool ValidateMP4ACodecID(const std::string& codec_id, const LogCB& log_cb) {
return false;
}
-static const CodecInfo kH264CodecInfo = { "avc1.*", CodecInfo::VIDEO, NULL,
- CodecInfo::HISTOGRAM_H264 };
+static const CodecInfo kH264AVC1CodecInfo = { "avc1.*", CodecInfo::VIDEO, NULL,
+ CodecInfo::HISTOGRAM_H264 };
+static const CodecInfo kH264AVC3CodecInfo = { "avc3.*", CodecInfo::VIDEO, NULL,
+ CodecInfo::HISTOGRAM_H264 };
static const CodecInfo kMPEG4AACCodecInfo = { "mp4a.40.*", CodecInfo::AUDIO,
&ValidateMP4ACodecID,
CodecInfo::HISTOGRAM_MPEG4AAC };
@@ -145,7 +147,8 @@ static const CodecInfo kEAC3CodecInfo = { "mp4a.a6", CodecInfo::AUDIO, NULL,
#endif
static const CodecInfo* kVideoMP4Codecs[] = {
- &kH264CodecInfo,
+ &kH264AVC1CodecInfo,
+ &kH264AVC3CodecInfo,
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
NULL
@@ -208,7 +211,8 @@ static StreamParser* BuildMP3Parser(
#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
static const CodecInfo* kVideoMP2TCodecs[] = {
- &kH264CodecInfo,
+ &kH264AVC1CodecInfo,
+ &kH264AVC3CodecInfo,
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
NULL
@@ -216,7 +220,16 @@ static const CodecInfo* kVideoMP2TCodecs[] = {
static StreamParser* BuildMP2TParser(
const std::vector<std::string>& codecs, const media::LogCB& log_cb) {
- return new media::mp2t::Mp2tStreamParser();
+ bool has_sbr = false;
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ std::string codec_id = codecs[i];
+ if (MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern) &&
+ GetMP4AudioObjectType(codec_id, log_cb) == kAACSBRObjectType) {
+ has_sbr = true;
+ }
+ }
+
+ return new media::mp2t::Mp2tStreamParser(has_sbr);
}
#endif
#endif
@@ -259,13 +272,20 @@ static bool VerifyCodec(
#endif
if (codec_info->tag == CodecInfo::HISTOGRAM_OPUS) {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kEnableOpusPlayback))
+ if (cmd_line->HasSwitch(switches::kDisableOpusPlayback))
return false;
}
if (audio_codecs)
audio_codecs->push_back(codec_info->tag);
return true;
case CodecInfo::VIDEO:
+#if defined(OS_ANDROID)
+ // VP9 is only supported on KitKat+ (API Level 19).
+ if (codec_info->tag == CodecInfo::HISTOGRAM_VP9 &&
+ base::android::BuildInfo::GetInstance()->sdk_int() < 19) {
+ return false;
+ }
+#endif
if (video_codecs)
video_codecs->push_back(codec_info->tag);
return true;
diff --git a/chromium/media/filters/video_decoder_selector.cc b/chromium/media/filters/video_decoder_selector.cc
index e961a316497..9e646a77d13 100644
--- a/chromium/media/filters/video_decoder_selector.cc
+++ b/chromium/media/filters/video_decoder_selector.cc
@@ -89,14 +89,14 @@ void VideoDecoderSelector::Abort() {
if (video_decoder_) {
// |decrypted_stream_| is either NULL or already initialized. We don't
- // need to Reset() |decrypted_stream_| in either case.
+ // need to Stop() |decrypted_stream_| in either case.
video_decoder_->Stop(base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
weak_ptr_factory_.GetWeakPtr()));
return;
}
if (decrypted_stream_) {
- decrypted_stream_->Reset(
+ decrypted_stream_->Stop(
base::Bind(&VideoDecoderSelector::ReturnNullDecoder,
weak_ptr_factory_.GetWeakPtr()));
return;
diff --git a/chromium/media/filters/video_frame_stream.cc b/chromium/media/filters/video_frame_stream.cc
index 80e59371492..b18cedafd85 100644
--- a/chromium/media/filters/video_frame_stream.cc
+++ b/chromium/media/filters/video_frame_stream.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/debug/trace_event.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
@@ -44,8 +45,6 @@ void VideoFrameStream::Initialize(DemuxerStream* stream,
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
- weak_this_ = weak_factory_.GetWeakPtr();
-
statistics_cb_ = statistics_cb;
init_cb_ = init_cb;
stream_ = stream;
@@ -53,7 +52,9 @@ void VideoFrameStream::Initialize(DemuxerStream* stream,
state_ = STATE_INITIALIZING;
// TODO(xhwang): VideoDecoderSelector only needs a config to select a decoder.
decoder_selector_->SelectVideoDecoder(
- stream, base::Bind(&VideoFrameStream::OnDecoderSelected, weak_this_));
+ stream,
+ base::Bind(&VideoFrameStream::OnDecoderSelected,
+ weak_factory_.GetWeakPtr()));
}
void VideoFrameStream::Read(const ReadCB& read_cb) {
@@ -95,12 +96,14 @@ void VideoFrameStream::Reset(const base::Closure& closure) {
// During decoder reinitialization, VideoDecoder does not need to be and
// cannot be Reset(). |decrypting_demuxer_stream_| was reset before decoder
// reinitialization.
- // During pending demuxer read, VideoDecoder will be reset after demuxer read
- // is returned (in OnBufferReady()).
- if (state_ == STATE_REINITIALIZING_DECODER ||
- state_ == STATE_PENDING_DEMUXER_READ) {
+ if (state_ == STATE_REINITIALIZING_DECODER)
+ return;
+
+ // During pending demuxer read and when not using DecryptingDemuxerStream,
+ // VideoDecoder will be reset after demuxer read is returned
+ // (in OnBufferReady()).
+ if (state_ == STATE_PENDING_DEMUXER_READ && !decrypting_demuxer_stream_)
return;
- }
// VideoDecoder API guarantees that if VideoDecoder::Reset() is called during
// a pending decode, the decode callback must be fired before the reset
@@ -109,7 +112,7 @@ void VideoFrameStream::Reset(const base::Closure& closure) {
// the decoder reset is finished.
if (decrypting_demuxer_stream_) {
decrypting_demuxer_stream_->Reset(base::Bind(
- &VideoFrameStream::ResetDecoder, weak_this_));
+ &VideoFrameStream::ResetDecoder, weak_factory_.GetWeakPtr()));
return;
}
@@ -129,18 +132,21 @@ void VideoFrameStream::Stop(const base::Closure& closure) {
return;
}
- // The stopping process will continue after the pending operation is finished.
- if (state_ == STATE_PENDING_DEMUXER_READ)
- return;
+ DCHECK(init_cb_.is_null());
+
+ // All pending callbacks will be dropped.
+ weak_factory_.InvalidateWeakPtrs();
+
+ // Post callbacks to prevent reentrance into this object.
+ if (!read_cb_.is_null())
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<VideoFrame>()));
+ if (!reset_cb_.is_null())
+ message_loop_->PostTask(FROM_HERE, base::ResetAndReturn(&reset_cb_));
- // VideoDecoder API guarantees that if VideoDecoder::Stop() is called during
- // a pending reset or a pending decode, the callbacks are always fired in the
- // decode -> reset -> stop order. Therefore, we can call VideoDecoder::Stop()
- // regardless of if we have a pending decode or reset and always satisfy the
- // stop callback when the decoder decode/reset is finished.
if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Reset(base::Bind(
- &VideoFrameStream::StopDecoder, weak_this_));
+ decrypting_demuxer_stream_->Stop(base::Bind(
+ &VideoFrameStream::StopDecoder, weak_factory_.GetWeakPtr()));
return;
}
@@ -204,6 +210,12 @@ void VideoFrameStream::SatisfyRead(Status status,
}
void VideoFrameStream::AbortRead() {
+ // Abort read during pending reset. It is safe to fire the |read_cb_| directly
+ // instead of posting it because VideoRenderBase won't call into this class
+ // again when it's in kFlushing state.
+ // TODO(xhwang): Improve the resetting process to avoid this dependency on the
+ // caller.
+ DCHECK(!reset_cb_.is_null());
SatisfyRead(ABORTED, NULL);
}
@@ -216,8 +228,10 @@ void VideoFrameStream::Decode(const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK(buffer);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
+
+ TRACE_EVENT_ASYNC_BEGIN0("media", "VideoFrameStream::Decode", this);
decoder_->Decode(buffer, base::Bind(&VideoFrameStream::OnFrameReady,
- weak_this_, buffer_size));
+ weak_factory_.GetWeakPtr(), buffer_size));
}
void VideoFrameStream::FlushDecoder() {
@@ -230,6 +244,9 @@ void VideoFrameStream::OnFrameReady(int buffer_size,
DVLOG(2) << __FUNCTION__;
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
DCHECK(!read_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
+
+ TRACE_EVENT_ASYNC_END0("media", "VideoFrameStream::Decode", this);
if (status == VideoDecoder::kDecodeError) {
DCHECK(!frame.get());
@@ -252,17 +269,16 @@ void VideoFrameStream::OnFrameReady(int buffer_size,
statistics_cb_.Run(statistics);
}
- // Drop decoding result if Reset()/Stop() was called during decoding.
- // The stopping/resetting process will be handled when the decoder is
- // stopped/reset.
- if (!stop_cb_.is_null() || !reset_cb_.is_null()) {
+ // Drop decoding result if Reset() was called during decoding.
+ // The resetting process will be handled when the decoder is reset.
+ if (!reset_cb_.is_null()) {
AbortRead();
return;
}
// Decoder flushed. Reinitialize the video decoder.
if (state_ == STATE_FLUSHING_DECODER &&
- status == VideoDecoder::kOk && frame->IsEndOfStream()) {
+ status == VideoDecoder::kOk && frame->end_of_stream()) {
ReinitializeDecoder();
return;
}
@@ -286,7 +302,8 @@ void VideoFrameStream::ReadFromDemuxerStream() {
DCHECK(stop_cb_.is_null());
state_ = STATE_PENDING_DEMUXER_READ;
- stream_->Read(base::Bind(&VideoFrameStream::OnBufferReady, weak_this_));
+ stream_->Read(
+ base::Bind(&VideoFrameStream::OnBufferReady, weak_factory_.GetWeakPtr()));
}
void VideoFrameStream::OnBufferReady(
@@ -297,25 +314,18 @@ void VideoFrameStream::OnBufferReady(
DCHECK_EQ(state_, STATE_PENDING_DEMUXER_READ) << state_;
DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
DCHECK(!read_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
state_ = STATE_NORMAL;
- // Reset()/Stop() was postponed during STATE_PENDING_DEMUXER_READ state.
- // We need to handle them in this function.
-
- if (!stop_cb_.is_null()) {
- AbortRead();
- if (!reset_cb_.is_null())
- Reset(base::ResetAndReturn(&reset_cb_));
- Stop(base::ResetAndReturn(&stop_cb_));
- return;
- }
-
if (status == DemuxerStream::kConfigChanged) {
state_ = STATE_FLUSHING_DECODER;
if (!reset_cb_.is_null()) {
AbortRead();
- Reset(base::ResetAndReturn(&reset_cb_));
+ // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
+ // which will continue the resetting process in it's callback.
+ if (!decrypting_demuxer_stream_)
+ Reset(base::ResetAndReturn(&reset_cb_));
// Reinitialization will continue after Reset() is done.
} else {
FlushDecoder();
@@ -325,7 +335,10 @@ void VideoFrameStream::OnBufferReady(
if (!reset_cb_.is_null()) {
AbortRead();
- Reset(base::ResetAndReturn(&reset_cb_));
+ // If we are using DecryptingDemuxerStream, we already called DDS::Reset()
+ // which will continue the resetting process in it's callback.
+ if (!decrypting_demuxer_stream_)
+ Reset(base::ResetAndReturn(&reset_cb_));
return;
}
@@ -345,40 +358,34 @@ void VideoFrameStream::ReinitializeDecoder() {
DCHECK(stream_->video_decoder_config().IsValidConfig());
state_ = STATE_REINITIALIZING_DECODER;
- decoder_->Initialize(
- stream_->video_decoder_config(),
- base::Bind(&VideoFrameStream::OnDecoderReinitialized, weak_this_));
+ decoder_->Initialize(stream_->video_decoder_config(),
+ base::Bind(&VideoFrameStream::OnDecoderReinitialized,
+ weak_factory_.GetWeakPtr()));
}
void VideoFrameStream::OnDecoderReinitialized(PipelineStatus status) {
DVLOG(2) << __FUNCTION__;
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
+ DCHECK(stop_cb_.is_null());
// ReinitializeDecoder() can be called in two cases:
// 1, Flushing decoder finished (see OnFrameReady()).
// 2, Reset() was called during flushing decoder (see OnDecoderReset()).
- // Also, Reset()/Stop() can be called during pending ReinitializeDecoder().
+ // Also, Reset() can be called during pending ReinitializeDecoder().
// This function needs to handle them all!
state_ = (status == PIPELINE_OK) ? STATE_NORMAL : STATE_ERROR;
- if (!read_cb_.is_null() && (!stop_cb_.is_null() || !reset_cb_.is_null()))
- AbortRead();
-
- if (!reset_cb_.is_null())
+ if (!reset_cb_.is_null()) {
+ if (!read_cb_.is_null())
+ AbortRead();
base::ResetAndReturn(&reset_cb_).Run();
-
- // If !stop_cb_.is_null(), it will be handled in OnDecoderStopped().
+ }
if (read_cb_.is_null())
return;
- if (!stop_cb_.is_null()) {
- base::ResetAndReturn(&read_cb_).Run(ABORTED, NULL);
- return;
- }
-
if (state_ == STATE_ERROR) {
SatisfyRead(DECODE_ERROR, NULL);
return;
@@ -394,7 +401,8 @@ void VideoFrameStream::ResetDecoder() {
state_ == STATE_ERROR) << state_;
DCHECK(!reset_cb_.is_null());
- decoder_->Reset(base::Bind(&VideoFrameStream::OnDecoderReset, weak_this_));
+ decoder_->Reset(base::Bind(&VideoFrameStream::OnDecoderReset,
+ weak_factory_.GetWeakPtr()));
}
void VideoFrameStream::OnDecoderReset() {
@@ -406,8 +414,9 @@ void VideoFrameStream::OnDecoderReset() {
// before the reset callback is fired.
DCHECK(read_cb_.is_null());
DCHECK(!reset_cb_.is_null());
+ DCHECK(stop_cb_.is_null());
- if (state_ != STATE_FLUSHING_DECODER || !stop_cb_.is_null()) {
+ if (state_ != STATE_FLUSHING_DECODER) {
base::ResetAndReturn(&reset_cb_).Run();
return;
}
@@ -422,7 +431,8 @@ void VideoFrameStream::StopDecoder() {
DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
DCHECK(!stop_cb_.is_null());
- decoder_->Stop(base::Bind(&VideoFrameStream::OnDecoderStopped, weak_this_));
+ decoder_->Stop(base::Bind(&VideoFrameStream::OnDecoderStopped,
+ weak_factory_.GetWeakPtr()));
}
void VideoFrameStream::OnDecoderStopped() {
diff --git a/chromium/media/filters/video_frame_stream.h b/chromium/media/filters/video_frame_stream.h
index 7933e62a636..f315677247c 100644
--- a/chromium/media/filters/video_frame_stream.h
+++ b/chromium/media/filters/video_frame_stream.h
@@ -27,7 +27,7 @@ class DecryptingDemuxerStream;
class VideoDecoderSelector;
// Wraps a DemuxerStream and a list of VideoDecoders and provides decoded
-// VideoFrames to its client (e.g. VideoRendererBase).
+// VideoFrames to its client (e.g. VideoRendererImpl).
class MEDIA_EXPORT VideoFrameStream {
public:
// Indicates completion of VideoFrameStream initialization.
@@ -136,7 +136,6 @@ class MEDIA_EXPORT VideoFrameStream {
scoped_refptr<base::MessageLoopProxy> message_loop_;
base::WeakPtrFactory<VideoFrameStream> weak_factory_;
- base::WeakPtr<VideoFrameStream> weak_this_;
State state_;
diff --git a/chromium/media/filters/video_frame_stream_unittest.cc b/chromium/media/filters/video_frame_stream_unittest.cc
index e57510563f9..c7d22acfa9e 100644
--- a/chromium/media/filters/video_frame_stream_unittest.cc
+++ b/chromium/media/filters/video_frame_stream_unittest.cc
@@ -40,7 +40,8 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
pending_read_(false),
pending_reset_(false),
pending_stop_(false),
- total_bytes_decoded_(0) {
+ total_bytes_decoded_(0),
+ has_no_key_(false) {
ScopedVector<VideoDecoder> decoders;
decoders.push_back(decoder_);
@@ -101,6 +102,12 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
void Decrypt(Decryptor::StreamType stream_type,
const scoped_refptr<DecoderBuffer>& encrypted,
const Decryptor::DecryptCB& decrypt_cb) {
+ DCHECK(encrypted->decrypt_config());
+ if (has_no_key_) {
+ decrypt_cb.Run(Decryptor::kNoKey, NULL);
+ return;
+ }
+
DCHECK_EQ(stream_type, Decryptor::kVideo);
scoped_refptr<DecoderBuffer> decrypted = DecoderBuffer::CopyFrom(
encrypted->data(), encrypted->data_size());
@@ -116,9 +123,9 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
// TODO(xhwang): Add test cases where the fake decoder returns error or
// the fake demuxer aborts demuxer read.
ASSERT_TRUE(status == VideoFrameStream::OK ||
- status == VideoFrameStream::ABORTED);
+ status == VideoFrameStream::ABORTED) << status;
frame_read_ = frame;
- if (frame.get() && !frame->IsEndOfStream())
+ if (frame.get() && !frame->end_of_stream())
num_decoded_frames_++;
pending_read_ = false;
}
@@ -138,13 +145,17 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
decoder_ = NULL;
}
+ void ReadOneFrame() {
+ frame_read_ = NULL;
+ pending_read_ = true;
+ video_frame_stream_->Read(base::Bind(
+ &VideoFrameStreamTest::FrameReady, base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
void ReadUntilPending() {
do {
- frame_read_ = NULL;
- pending_read_ = true;
- video_frame_stream_->Read(base::Bind(
- &VideoFrameStreamTest::FrameReady, base::Unretained(this)));
- message_loop_.RunUntilIdle();
+ ReadOneFrame();
} while (!pending_read_);
}
@@ -153,6 +164,7 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
DEMUXER_READ_NORMAL,
DEMUXER_READ_CONFIG_CHANGE,
SET_DECRYPTOR,
+ DECRYPTOR_NO_KEY,
DECODER_INIT,
DECODER_REINIT,
DECODER_READ,
@@ -181,6 +193,13 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
InitializeVideoFrameStream();
break;
+ case DECRYPTOR_NO_KEY:
+ EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
+ .WillRepeatedly(RunCallback<0>(decryptor_.get()));
+ has_no_key_ = true;
+ ReadOneFrame();
+ break;
+
case DECODER_INIT:
EXPECT_CALL(*this, SetDecryptorReadyCallback(_))
.WillRepeatedly(RunCallback<0>(decryptor_.get()));
@@ -230,9 +249,10 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
demuxer_stream_->SatisfyRead();
break;
+ // These two cases are only interesting to test during
+ // VideoFrameStream::Stop(). There's no need to satisfy a callback.
case SET_DECRYPTOR:
- // VideoFrameStream::Stop() does not wait for pending DecryptorReadyCB.
- // Therefore there's no need to satisfy a callback.
+ case DECRYPTOR_NO_KEY:
NOTREACHED();
break;
@@ -245,12 +265,10 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
break;
case DECODER_READ:
- DCHECK(pending_read_);
decoder_->SatisfyRead();
break;
case DECODER_RESET:
- DCHECK(pending_reset_);
decoder_->SatisfyReset();
break;
@@ -305,6 +323,9 @@ class VideoFrameStreamTest : public testing::TestWithParam<bool> {
int total_bytes_decoded_;
scoped_refptr<VideoFrame> frame_read_;
+ // Decryptor has no key to decrypt a frame.
+ bool has_no_key_;
+
private:
DISALLOW_COPY_AND_ASSIGN(VideoFrameStreamTest);
};
@@ -325,7 +346,7 @@ TEST_P(VideoFrameStreamTest, ReadAllFrames) {
Initialize();
do {
Read();
- } while (frame_read_.get() && !frame_read_->IsEndOfStream());
+ } while (frame_read_.get() && !frame_read_->end_of_stream());
const int total_num_frames = kNumConfigs * kNumBuffersInOneConfig;
DCHECK_EQ(num_decoded_frames_, total_num_frames);
@@ -408,6 +429,12 @@ TEST_P(VideoFrameStreamTest, Reset_AfterDemuxerRead_ConfigChange) {
Read();
}
+TEST_P(VideoFrameStreamTest, Reset_DuringNoKeyRead) {
+ Initialize();
+ EnterPendingState(DECRYPTOR_NO_KEY);
+ Reset();
+}
+
TEST_P(VideoFrameStreamTest, Stop_BeforeInitialization) {
pending_stop_ = true;
video_frame_stream_->Stop(
@@ -492,6 +519,12 @@ TEST_P(VideoFrameStreamTest, Stop_AfterConfigChangeRead) {
Stop();
}
+TEST_P(VideoFrameStreamTest, Stop_DuringNoKeyRead) {
+ Initialize();
+ EnterPendingState(DECRYPTOR_NO_KEY);
+ Stop();
+}
+
TEST_P(VideoFrameStreamTest, Stop_DuringReset) {
Initialize();
EnterPendingState(DECODER_RESET);
diff --git a/chromium/media/filters/video_renderer_base.cc b/chromium/media/filters/video_renderer_impl.cc
index 806bb4a2fb8..da07d9a6afb 100644
--- a/chromium/media/filters/video_renderer_base.cc
+++ b/chromium/media/filters/video_renderer_impl.cc
@@ -1,12 +1,13 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/video_renderer_base.h"
+#include "media/filters/video_renderer_impl.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/debug/trace_event.h"
#include "base/message_loop/message_loop.h"
#include "base/threading/platform_thread.h"
#include "media/base/buffers.h"
@@ -16,11 +17,11 @@
namespace media {
-base::TimeDelta VideoRendererBase::kMaxLastFrameDuration() {
+base::TimeDelta VideoRendererImpl::kMaxLastFrameDuration() {
return base::TimeDelta::FromMilliseconds(250);
}
-VideoRendererBase::VideoRendererBase(
+VideoRendererImpl::VideoRendererImpl(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
@@ -40,17 +41,19 @@ VideoRendererBase::VideoRendererBase(
playback_rate_(0),
paint_cb_(paint_cb),
set_opaque_cb_(set_opaque_cb),
- last_timestamp_(kNoTimestamp()) {
+ last_timestamp_(kNoTimestamp()),
+ frames_decoded_(0),
+ frames_dropped_(0) {
DCHECK(!paint_cb_.is_null());
}
-VideoRendererBase::~VideoRendererBase() {
+VideoRendererImpl::~VideoRendererImpl() {
base::AutoLock auto_lock(lock_);
CHECK(state_ == kStopped || state_ == kUninitialized) << state_;
CHECK(thread_.is_null());
}
-void VideoRendererBase::Play(const base::Closure& callback) {
+void VideoRendererImpl::Play(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(kPrerolled, state_);
@@ -58,7 +61,7 @@ void VideoRendererBase::Play(const base::Closure& callback) {
callback.Run();
}
-void VideoRendererBase::Pause(const base::Closure& callback) {
+void VideoRendererImpl::Pause(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(state_ != kUninitialized || state_ == kError);
@@ -66,7 +69,7 @@ void VideoRendererBase::Pause(const base::Closure& callback) {
callback.Run();
}
-void VideoRendererBase::Flush(const base::Closure& callback) {
+void VideoRendererImpl::Flush(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kPaused);
@@ -78,10 +81,10 @@ void VideoRendererBase::Flush(const base::Closure& callback) {
ready_frames_.clear();
received_end_of_stream_ = false;
video_frame_stream_.Reset(base::Bind(
- &VideoRendererBase::OnVideoFrameStreamResetDone, weak_this_));
+ &VideoRendererImpl::OnVideoFrameStreamResetDone, weak_this_));
}
-void VideoRendererBase::Stop(const base::Closure& callback) {
+void VideoRendererImpl::Stop(const base::Closure& callback) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kUninitialized || state_ == kStopped) {
@@ -115,27 +118,41 @@ void VideoRendererBase::Stop(const base::Closure& callback) {
video_frame_stream_.Stop(callback);
}
-void VideoRendererBase::SetPlaybackRate(float playback_rate) {
+void VideoRendererImpl::SetPlaybackRate(float playback_rate) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
playback_rate_ = playback_rate;
}
-void VideoRendererBase::Preroll(base::TimeDelta time,
+void VideoRendererImpl::Preroll(base::TimeDelta time,
const PipelineStatusCB& cb) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kFlushed) << "Must flush prior to prerolling.";
DCHECK(!cb.is_null());
DCHECK(preroll_cb_.is_null());
+ DCHECK(state_ == kFlushed || state_== kPaused) << "state_ " << state_;
+
+ if (state_ == kFlushed) {
+ DCHECK(time != kNoTimestamp());
+ DCHECK(!pending_read_);
+ DCHECK(ready_frames_.empty());
+ } else {
+ DCHECK(time == kNoTimestamp());
+ }
state_ = kPrerolling;
preroll_cb_ = cb;
preroll_timestamp_ = time;
+
+ if (ShouldTransitionToPrerolled_Locked()) {
+ TransitionToPrerolled_Locked();
+ return;
+ }
+
AttemptRead_Locked();
}
-void VideoRendererBase::Initialize(DemuxerStream* stream,
+void VideoRendererImpl::Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& max_time_cb,
@@ -171,11 +188,11 @@ void VideoRendererBase::Initialize(DemuxerStream* stream,
video_frame_stream_.Initialize(
stream,
statistics_cb,
- base::Bind(&VideoRendererBase::OnVideoFrameStreamInitialized,
+ base::Bind(&VideoRendererImpl::OnVideoFrameStreamInitialized,
weak_this_));
}
-void VideoRendererBase::OnVideoFrameStreamInitialized(bool success,
+void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success,
bool has_alpha) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -217,7 +234,7 @@ void VideoRendererBase::OnVideoFrameStreamInitialized(bool success,
}
// PlatformThread::Delegate implementation.
-void VideoRendererBase::ThreadMain() {
+void VideoRendererImpl::ThreadMain() {
base::PlatformThread::SetName("CrVideoRenderer");
// The number of milliseconds to idle when we do not have anything to do.
@@ -238,7 +255,7 @@ void VideoRendererBase::ThreadMain() {
// Remain idle as long as we're not playing.
if (state_ != kPlaying || playback_rate_ == 0) {
- frame_available_.TimedWait(kIdleTimeDelta);
+ UpdateStatsAndWait_Locked(kIdleTimeDelta);
continue;
}
@@ -252,7 +269,7 @@ void VideoRendererBase::ThreadMain() {
continue;
}
- frame_available_.TimedWait(kIdleTimeDelta);
+ UpdateStatsAndWait_Locked(kIdleTimeDelta);
continue;
}
@@ -263,7 +280,7 @@ void VideoRendererBase::ThreadMain() {
// render the next frame.
if (remaining_time.InMicroseconds() > 0) {
remaining_time = std::min(remaining_time, kIdleTimeDelta);
- frame_available_.TimedWait(remaining_time);
+ UpdateStatsAndWait_Locked(remaining_time);
continue;
}
@@ -297,11 +314,12 @@ void VideoRendererBase::ThreadMain() {
}
}
-void VideoRendererBase::PaintNextReadyFrame_Locked() {
+void VideoRendererImpl::PaintNextReadyFrame_Locked() {
lock_.AssertAcquired();
scoped_refptr<VideoFrame> next_frame = ready_frames_.front();
ready_frames_.pop_front();
+ frames_decoded_++;
last_timestamp_ = next_frame->GetTimestamp();
@@ -314,24 +332,24 @@ void VideoRendererBase::PaintNextReadyFrame_Locked() {
paint_cb_.Run(next_frame);
message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoRendererBase::AttemptRead, weak_this_));
+ &VideoRendererImpl::AttemptRead, weak_this_));
}
-void VideoRendererBase::DropNextReadyFrame_Locked() {
+void VideoRendererImpl::DropNextReadyFrame_Locked() {
+ TRACE_EVENT0("media", "VideoRendererImpl:frameDropped");
+
lock_.AssertAcquired();
last_timestamp_ = ready_frames_.front()->GetTimestamp();
ready_frames_.pop_front();
-
- PipelineStatistics statistics;
- statistics.video_frames_dropped = 1;
- statistics_cb_.Run(statistics);
+ frames_decoded_++;
+ frames_dropped_++;
message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoRendererBase::AttemptRead, weak_this_));
+ &VideoRendererImpl::AttemptRead, weak_this_));
}
-void VideoRendererBase::FrameReady(VideoFrameStream::Status status,
+void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
const scoped_refptr<VideoFrame>& frame) {
base::AutoLock auto_lock(lock_);
DCHECK_NE(state_, kUninitialized);
@@ -371,7 +389,7 @@ void VideoRendererBase::FrameReady(VideoFrameStream::Status status,
return;
}
- if (frame->IsEndOfStream()) {
+ if (frame->end_of_stream()) {
DCHECK(!received_end_of_stream_);
received_end_of_stream_ = true;
max_time_cb_.Run(get_duration_cb_.Run());
@@ -384,23 +402,15 @@ void VideoRendererBase::FrameReady(VideoFrameStream::Status status,
// Maintain the latest frame decoded so the correct frame is displayed after
// prerolling has completed.
- if (state_ == kPrerolling && frame->GetTimestamp() <= preroll_timestamp_) {
+ if (state_ == kPrerolling && preroll_timestamp_ != kNoTimestamp() &&
+ frame->GetTimestamp() <= preroll_timestamp_) {
ready_frames_.clear();
}
AddReadyFrame_Locked(frame);
- if (state_ == kPrerolling) {
- if (!video_frame_stream_.CanReadWithoutStalling() ||
- ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames)) {
- TransitionToPrerolled_Locked();
- }
- } else {
- // We only count frames decoded during normal playback.
- PipelineStatistics statistics;
- statistics.video_frames_decoded = 1;
- statistics_cb_.Run(statistics);
- }
+ if (ShouldTransitionToPrerolled_Locked())
+ TransitionToPrerolled_Locked();
// Always request more decoded video if we have capacity. This serves two
// purposes:
@@ -409,10 +419,16 @@ void VideoRendererBase::FrameReady(VideoFrameStream::Status status,
AttemptRead_Locked();
}
-void VideoRendererBase::AddReadyFrame_Locked(
+bool VideoRendererImpl::ShouldTransitionToPrerolled_Locked() {
+ return state_ == kPrerolling &&
+ (!video_frame_stream_.CanReadWithoutStalling() ||
+ ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames));
+}
+
+void VideoRendererImpl::AddReadyFrame_Locked(
const scoped_refptr<VideoFrame>& frame) {
lock_.AssertAcquired();
- DCHECK(!frame->IsEndOfStream());
+ DCHECK(!frame->end_of_stream());
// Adjust the incoming frame if its rendering stop time is past the duration
// of the video itself. This is typically the last frame of the video and
@@ -435,12 +451,12 @@ void VideoRendererBase::AddReadyFrame_Locked(
frame_available_.Signal();
}
-void VideoRendererBase::AttemptRead() {
+void VideoRendererImpl::AttemptRead() {
base::AutoLock auto_lock(lock_);
AttemptRead_Locked();
}
-void VideoRendererBase::AttemptRead_Locked() {
+void VideoRendererImpl::AttemptRead_Locked() {
DCHECK(message_loop_->BelongsToCurrentThread());
lock_.AssertAcquired();
@@ -454,7 +470,7 @@ void VideoRendererBase::AttemptRead_Locked() {
case kPrerolling:
case kPlaying:
pending_read_ = true;
- video_frame_stream_.Read(base::Bind(&VideoRendererBase::FrameReady,
+ video_frame_stream_.Read(base::Bind(&VideoRendererImpl::FrameReady,
weak_this_));
return;
@@ -470,7 +486,7 @@ void VideoRendererBase::AttemptRead_Locked() {
}
}
-void VideoRendererBase::OnVideoFrameStreamResetDone() {
+void VideoRendererImpl::OnVideoFrameStreamResetDone() {
base::AutoLock auto_lock(lock_);
if (state_ == kStopped)
return;
@@ -485,7 +501,7 @@ void VideoRendererBase::OnVideoFrameStreamResetDone() {
base::ResetAndReturn(&flush_cb_).Run();
}
-base::TimeDelta VideoRendererBase::CalculateSleepDuration(
+base::TimeDelta VideoRendererImpl::CalculateSleepDuration(
const scoped_refptr<VideoFrame>& next_frame,
float playback_rate) {
// Determine the current and next presentation timestamps.
@@ -498,13 +514,13 @@ base::TimeDelta VideoRendererBase::CalculateSleepDuration(
static_cast<int64>(sleep.InMicroseconds() / playback_rate));
}
-void VideoRendererBase::DoStopOrError_Locked() {
+void VideoRendererImpl::DoStopOrError_Locked() {
lock_.AssertAcquired();
last_timestamp_ = kNoTimestamp();
ready_frames_.clear();
}
-void VideoRendererBase::TransitionToPrerolled_Locked() {
+void VideoRendererImpl::TransitionToPrerolled_Locked() {
lock_.AssertAcquired();
DCHECK_EQ(state_, kPrerolling);
@@ -519,4 +535,23 @@ void VideoRendererBase::TransitionToPrerolled_Locked() {
base::ResetAndReturn(&preroll_cb_).Run(PIPELINE_OK);
}
+void VideoRendererImpl::UpdateStatsAndWait_Locked(
+ base::TimeDelta wait_duration) {
+ lock_.AssertAcquired();
+ DCHECK_GE(frames_decoded_, 0);
+ DCHECK_LE(frames_dropped_, frames_decoded_);
+
+ if (frames_decoded_) {
+ PipelineStatistics statistics;
+ statistics.video_frames_decoded = frames_decoded_;
+ statistics.video_frames_dropped = frames_dropped_;
+ statistics_cb_.Run(statistics);
+
+ frames_decoded_ = 0;
+ frames_dropped_ = 0;
+ }
+
+ frame_available_.TimedWait(wait_duration);
+}
+
} // namespace media
diff --git a/chromium/media/filters/video_renderer_base.h b/chromium/media/filters/video_renderer_impl.h
index 93b2bcd8add..79c5d824ced 100644
--- a/chromium/media/filters/video_renderer_base.h
+++ b/chromium/media/filters/video_renderer_impl.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_VIDEO_RENDERER_BASE_H_
-#define MEDIA_FILTERS_VIDEO_RENDERER_BASE_H_
+#ifndef MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
+#define MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
#include <deque>
@@ -27,11 +27,11 @@ class MessageLoopProxy;
namespace media {
-// VideoRendererBase creates its own thread for the sole purpose of timing frame
+// VideoRendererImpl creates its own thread for the sole purpose of timing frame
// presentation. It handles reading from the VideoFrameStream and stores the
// results in a queue of decoded frames and executing a callback when a frame is
// ready for rendering.
-class MEDIA_EXPORT VideoRendererBase
+class MEDIA_EXPORT VideoRendererImpl
: public VideoRenderer,
public base::PlatformThread::Delegate {
public:
@@ -54,13 +54,13 @@ class MEDIA_EXPORT VideoRendererBase
// down the video thread may result in losing synchronization with audio.
//
// Setting |drop_frames_| to true causes the renderer to drop expired frames.
- VideoRendererBase(const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ VideoRendererImpl(const scoped_refptr<base::MessageLoopProxy>& message_loop,
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
const SetOpaqueCB& set_opaque_cb,
bool drop_frames);
- virtual ~VideoRendererBase();
+ virtual ~VideoRendererImpl();
// VideoRenderer implementation.
virtual void Initialize(DemuxerStream* stream,
@@ -129,14 +129,23 @@ class MEDIA_EXPORT VideoRendererBase
void TransitionToPrerolled_Locked();
+ // Returns true of all conditions have been met to transition from
+ // kPrerolling to kPrerolled.
+ bool ShouldTransitionToPrerolled_Locked();
+
+ // Runs |statistics_cb_| with |frames_decoded_| and |frames_dropped_|, resets
+ // them to 0, and then waits on |frame_available_| for up to the
+ // |wait_duration|.
+ void UpdateStatsAndWait_Locked(base::TimeDelta wait_duration);
+
scoped_refptr<base::MessageLoopProxy> message_loop_;
- base::WeakPtrFactory<VideoRendererBase> weak_factory_;
- base::WeakPtr<VideoRendererBase> weak_this_;
+ base::WeakPtrFactory<VideoRendererImpl> weak_factory_;
+ base::WeakPtr<VideoRendererImpl> weak_this_;
// Used for accessing data members.
base::Lock lock_;
- // Provides video frames to VideoRendererBase.
+ // Provides video frames to VideoRendererImpl.
VideoFrameStream video_frame_stream_;
// Queue of incoming frames yet to be painted.
@@ -236,9 +245,14 @@ class MEDIA_EXPORT VideoRendererBase
// during flushing.
base::TimeDelta last_timestamp_;
- DISALLOW_COPY_AND_ASSIGN(VideoRendererBase);
+ // Keeps track of the number of frames decoded and dropped since the
+ // last call to |statistics_cb_|. These must be accessed under lock.
+ int frames_decoded_;
+ int frames_dropped_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoRendererImpl);
};
} // namespace media
-#endif // MEDIA_FILTERS_VIDEO_RENDERER_BASE_H_
+#endif // MEDIA_FILTERS_VIDEO_RENDERER_IMPL_H_
diff --git a/chromium/media/filters/video_renderer_base_unittest.cc b/chromium/media/filters/video_renderer_impl_unittest.cc
index 84d356dae17..0b07a751d37 100644
--- a/chromium/media/filters/video_renderer_base_unittest.cc
+++ b/chromium/media/filters/video_renderer_impl_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -19,7 +19,7 @@
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/base/video_frame.h"
-#include "media/filters/video_renderer_base.h"
+#include "media/filters/video_renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -36,20 +36,20 @@ namespace media {
static const int kFrameDurationInMs = 10;
static const int kVideoDurationInMs = kFrameDurationInMs * 100;
-class VideoRendererBaseTest : public ::testing::Test {
+class VideoRendererImplTest : public ::testing::Test {
public:
- VideoRendererBaseTest()
+ VideoRendererImplTest()
: decoder_(new MockVideoDecoder()),
demuxer_stream_(DemuxerStream::VIDEO) {
ScopedVector<VideoDecoder> decoders;
decoders.push_back(decoder_);
- renderer_.reset(new VideoRendererBase(
+ renderer_.reset(new VideoRendererImpl(
message_loop_.message_loop_proxy(),
decoders.Pass(),
media::SetDecryptorReadyCB(),
- base::Bind(&VideoRendererBaseTest::OnPaint, base::Unretained(this)),
- base::Bind(&VideoRendererBaseTest::OnSetOpaque, base::Unretained(this)),
+ base::Bind(&VideoRendererImplTest::OnPaint, base::Unretained(this)),
+ base::Bind(&VideoRendererImplTest::OnSetOpaque, base::Unretained(this)),
true));
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
@@ -59,7 +59,7 @@ class VideoRendererBaseTest : public ::testing::Test {
.WillRepeatedly(RunCallback<0>(DemuxerStream::kOk,
DecoderBuffer::CreateEOSBuffer()));
EXPECT_CALL(*decoder_, Stop(_))
- .WillRepeatedly(Invoke(this, &VideoRendererBaseTest::StopRequested));
+ .WillRepeatedly(Invoke(this, &VideoRendererImplTest::StopRequested));
EXPECT_CALL(statistics_cb_object_, OnStatistics(_))
.Times(AnyNumber());
EXPECT_CALL(*this, OnTimeUpdate(_))
@@ -68,9 +68,9 @@ class VideoRendererBaseTest : public ::testing::Test {
.Times(AnyNumber());
}
- virtual ~VideoRendererBaseTest() {}
+ virtual ~VideoRendererImplTest() {}
- // Callbacks passed into VideoRendererBase().
+ // Callbacks passed into VideoRendererImpl().
MOCK_CONST_METHOD1(OnSetOpaque, void(bool));
// Callbacks passed into Initialize().
@@ -86,10 +86,10 @@ class VideoRendererBaseTest : public ::testing::Test {
// Monitor decodes from the decoder.
EXPECT_CALL(*decoder_, Decode(_, _))
- .WillRepeatedly(Invoke(this, &VideoRendererBaseTest::FrameRequested));
+ .WillRepeatedly(Invoke(this, &VideoRendererImplTest::FrameRequested));
EXPECT_CALL(*decoder_, Reset(_))
- .WillRepeatedly(Invoke(this, &VideoRendererBaseTest::FlushRequested));
+ .WillRepeatedly(Invoke(this, &VideoRendererImplTest::FlushRequested));
InSequence s;
@@ -124,14 +124,14 @@ class VideoRendererBaseTest : public ::testing::Test {
status_cb,
base::Bind(&MockStatisticsCB::OnStatistics,
base::Unretained(&statistics_cb_object_)),
- base::Bind(&VideoRendererBaseTest::OnTimeUpdate,
+ base::Bind(&VideoRendererImplTest::OnTimeUpdate,
base::Unretained(this)),
- base::Bind(&VideoRendererBaseTest::OnNaturalSizeChanged,
+ base::Bind(&VideoRendererImplTest::OnNaturalSizeChanged,
base::Unretained(this)),
ended_event_.GetClosure(),
error_event_.GetPipelineStatusCB(),
- base::Bind(&VideoRendererBaseTest::GetTime, base::Unretained(this)),
- base::Bind(&VideoRendererBaseTest::GetDuration,
+ base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)),
+ base::Bind(&VideoRendererImplTest::GetDuration,
base::Unretained(this)));
}
@@ -186,7 +186,7 @@ class VideoRendererBaseTest : public ::testing::Test {
gfx::Size natural_size = TestVideoConfig::NormalCodedSize();
scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
- VideoFrame::RGB32, natural_size, gfx::Rect(natural_size), natural_size,
+ VideoFrame::YV12, natural_size, gfx::Rect(natural_size), natural_size,
next_frame_timestamp_);
decode_results_.push_back(std::make_pair(
VideoDecoder::kOk, frame));
@@ -197,7 +197,7 @@ class VideoRendererBaseTest : public ::testing::Test {
void QueueEndOfStream() {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
decode_results_.push_back(std::make_pair(
- VideoDecoder::kOk, VideoFrame::CreateEmptyFrame()));
+ VideoDecoder::kOk, VideoFrame::CreateEOSFrame()));
}
void QueueDecodeError() {
@@ -293,7 +293,7 @@ class VideoRendererBaseTest : public ::testing::Test {
protected:
// Fixture members.
- scoped_ptr<VideoRendererBase> renderer_;
+ scoped_ptr<VideoRendererImpl> renderer_;
MockVideoDecoder* decoder_; // Owned by |renderer_|.
NiceMock<MockDemuxerStream> demuxer_stream_;
MockStatisticsCB statistics_cb_object_;
@@ -372,19 +372,19 @@ class VideoRendererBaseTest : public ::testing::Test {
std::deque<std::pair<
VideoDecoder::Status, scoped_refptr<VideoFrame> > > decode_results_;
- DISALLOW_COPY_AND_ASSIGN(VideoRendererBaseTest);
+ DISALLOW_COPY_AND_ASSIGN(VideoRendererImplTest);
};
-TEST_F(VideoRendererBaseTest, DoNothing) {
+TEST_F(VideoRendererImplTest, DoNothing) {
// Test that creation and deletion doesn't depend on calls to Initialize()
// and/or Stop().
}
-TEST_F(VideoRendererBaseTest, StopWithoutInitialize) {
+TEST_F(VideoRendererImplTest, StopWithoutInitialize) {
Stop();
}
-TEST_F(VideoRendererBaseTest, Initialize) {
+TEST_F(VideoRendererImplTest, Initialize) {
Initialize();
EXPECT_EQ(0, GetCurrentTimestampInMs());
Shutdown();
@@ -395,38 +395,38 @@ static void ExpectNotCalled(PipelineStatus) {
ADD_FAILURE() << "Expected callback not to be called\n" << stack.ToString();
}
-TEST_F(VideoRendererBaseTest, StopWhileInitializing) {
+TEST_F(VideoRendererImplTest, StopWhileInitializing) {
EXPECT_CALL(*decoder_, Initialize(_, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
CallInitialize(base::Bind(&ExpectNotCalled));
Stop();
- // ~VideoRendererBase() will CHECK() if we left anything initialized.
+ // ~VideoRendererImpl() will CHECK() if we left anything initialized.
}
-TEST_F(VideoRendererBaseTest, StopWhileFlushing) {
+TEST_F(VideoRendererImplTest, StopWhileFlushing) {
Initialize();
Pause();
renderer_->Flush(base::Bind(&ExpectNotCalled, PIPELINE_OK));
Stop();
- // ~VideoRendererBase() will CHECK() if we left anything initialized.
+ // ~VideoRendererImpl() will CHECK() if we left anything initialized.
}
-TEST_F(VideoRendererBaseTest, Play) {
+TEST_F(VideoRendererImplTest, Play) {
Initialize();
Play();
Shutdown();
}
-TEST_F(VideoRendererBaseTest, EndOfStream_DefaultFrameDuration) {
+TEST_F(VideoRendererImplTest, EndOfStream_DefaultFrameDuration) {
Initialize();
Play();
// Verify that the ended callback fires when the default last frame duration
// has elapsed.
int end_timestamp = kFrameDurationInMs * limits::kMaxVideoFrames +
- VideoRendererBase::kMaxLastFrameDuration().InMilliseconds();
+ VideoRendererImpl::kMaxLastFrameDuration().InMilliseconds();
EXPECT_LT(end_timestamp, kVideoDurationInMs);
QueueEndOfStream();
@@ -436,7 +436,7 @@ TEST_F(VideoRendererBaseTest, EndOfStream_DefaultFrameDuration) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, EndOfStream_ClipDuration) {
+TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
int duration = kVideoDurationInMs + kFrameDurationInMs / 2;
InitializeWithDuration(duration);
Play();
@@ -461,7 +461,7 @@ TEST_F(VideoRendererBaseTest, EndOfStream_ClipDuration) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, DecodeError_Playing) {
+TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
Play();
@@ -471,7 +471,7 @@ TEST_F(VideoRendererBaseTest, DecodeError_Playing) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, DecodeError_DuringPreroll) {
+TEST_F(VideoRendererImplTest, DecodeError_DuringPreroll) {
Initialize();
Pause();
Flush();
@@ -481,7 +481,7 @@ TEST_F(VideoRendererBaseTest, DecodeError_DuringPreroll) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, Preroll_Exact) {
+TEST_F(VideoRendererImplTest, Preroll_Exact) {
Initialize();
Pause();
Flush();
@@ -492,7 +492,7 @@ TEST_F(VideoRendererBaseTest, Preroll_Exact) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, Preroll_RightBefore) {
+TEST_F(VideoRendererImplTest, Preroll_RightBefore) {
Initialize();
Pause();
Flush();
@@ -503,7 +503,7 @@ TEST_F(VideoRendererBaseTest, Preroll_RightBefore) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, Preroll_RightAfter) {
+TEST_F(VideoRendererImplTest, Preroll_RightAfter) {
Initialize();
Pause();
Flush();
@@ -514,7 +514,7 @@ TEST_F(VideoRendererBaseTest, Preroll_RightAfter) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, PlayAfterPreroll) {
+TEST_F(VideoRendererImplTest, PlayAfterPreroll) {
Initialize();
Pause();
Flush();
@@ -530,20 +530,71 @@ TEST_F(VideoRendererBaseTest, PlayAfterPreroll) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Initialized) {
+TEST_F(VideoRendererImplTest, Rebuffer) {
+ Initialize();
+
+ Play();
+
+ // Advance time past prerolled time drain the ready frame queue.
+ AdvanceTimeInMs(5 * kFrameDurationInMs);
+ WaitForPendingRead();
+
+ // Simulate a Pause/Preroll/Play rebuffer sequence.
+ Pause();
+
+ WaitableMessageLoopEvent event;
+ renderer_->Preroll(kNoTimestamp(),
+ event.GetPipelineStatusCB());
+
+ // Queue enough frames to satisfy preroll.
+ for (int i = 0; i < limits::kMaxVideoFrames; ++i)
+ QueueNextFrame();
+
+ SatisfyPendingRead();
+
+ event.RunAndWaitForStatus(PIPELINE_OK);
+
+ Play();
+
+ Shutdown();
+}
+
+TEST_F(VideoRendererImplTest, Rebuffer_AlreadyHaveEnoughFrames) {
+ Initialize();
+
+ // Queue an extra frame so that we'll have enough frames to satisfy
+ // preroll even after the first frame is painted.
+ QueueNextFrame();
+ Play();
+
+ // Simulate a Pause/Preroll/Play rebuffer sequence.
+ Pause();
+
+ WaitableMessageLoopEvent event;
+ renderer_->Preroll(kNoTimestamp(),
+ event.GetPipelineStatusCB());
+
+ event.RunAndWaitForStatus(PIPELINE_OK);
+
+ Play();
+
+ Shutdown();
+}
+
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Initialized) {
Initialize();
EXPECT_TRUE(GetCurrentFrame().get()); // Due to prerolling.
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Playing) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Playing) {
Initialize();
Play();
EXPECT_TRUE(GetCurrentFrame().get());
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Paused) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Paused) {
Initialize();
Play();
Pause();
@@ -551,7 +602,7 @@ TEST_F(VideoRendererBaseTest, GetCurrentFrame_Paused) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Flushed) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Flushed) {
Initialize();
Play();
Pause();
@@ -564,7 +615,7 @@ TEST_F(VideoRendererBaseTest, GetCurrentFrame_Flushed) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_EndOfStream) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_EndOfStream) {
Initialize();
Play();
Pause();
@@ -585,7 +636,7 @@ TEST_F(VideoRendererBaseTest, GetCurrentFrame_EndOfStream) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Shutdown) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Shutdown) {
Initialize();
// Frame shouldn't be updated.
@@ -595,7 +646,7 @@ TEST_F(VideoRendererBaseTest, GetCurrentFrame_Shutdown) {
}
// Stop() is called immediately during an error.
-TEST_F(VideoRendererBaseTest, GetCurrentFrame_Error) {
+TEST_F(VideoRendererImplTest, GetCurrentFrame_Error) {
Initialize();
// Frame shouldn't be updated.
@@ -605,7 +656,7 @@ TEST_F(VideoRendererBaseTest, GetCurrentFrame_Error) {
}
// Verify that a late decoder response doesn't break invariants in the renderer.
-TEST_F(VideoRendererBaseTest, StopDuringOutstandingRead) {
+TEST_F(VideoRendererImplTest, StopDuringOutstandingRead) {
Initialize();
Play();
@@ -619,7 +670,7 @@ TEST_F(VideoRendererBaseTest, StopDuringOutstandingRead) {
event.RunAndWait();
}
-TEST_F(VideoRendererBaseTest, AbortPendingRead_Playing) {
+TEST_F(VideoRendererImplTest, AbortPendingRead_Playing) {
Initialize();
Play();
@@ -638,7 +689,7 @@ TEST_F(VideoRendererBaseTest, AbortPendingRead_Playing) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, AbortPendingRead_Flush) {
+TEST_F(VideoRendererImplTest, AbortPendingRead_Flush) {
Initialize();
Play();
@@ -651,7 +702,7 @@ TEST_F(VideoRendererBaseTest, AbortPendingRead_Flush) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, AbortPendingRead_Preroll) {
+TEST_F(VideoRendererImplTest, AbortPendingRead_Preroll) {
Initialize();
Pause();
Flush();
@@ -661,7 +712,7 @@ TEST_F(VideoRendererBaseTest, AbortPendingRead_Preroll) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, VideoDecoder_InitFailure) {
+TEST_F(VideoRendererImplTest, VideoDecoder_InitFailure) {
InSequence s;
EXPECT_CALL(*decoder_, Initialize(_, _))
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index 3c02d15f906..e270335504a 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -42,17 +42,25 @@ static const int kDecodeThreads = 2;
static const int kMaxDecodeThreads = 16;
// Returns the number of threads.
-static int GetThreadCount() {
- // TODO(scherkus): De-duplicate this function and the one used by
- // FFmpegVideoDecoder.
-
+static int GetThreadCount(const VideoDecoderConfig& config) {
// Refer to http://crbug.com/93932 for tsan suppressions on decoding.
int decode_threads = kDecodeThreads;
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
- if (threads.empty() || !base::StringToInt(threads, &decode_threads))
+ if (threads.empty() || !base::StringToInt(threads, &decode_threads)) {
+ if (config.codec() == kCodecVP9) {
+ // For VP9 decode when using the default thread count, increase the number
+ // of decode threads to equal the maximum number of tiles possible for
+ // higher resolution streams.
+ if (config.coded_size().width() >= 2048)
+ decode_threads = 8;
+ else if (config.coded_size().width() >= 1024)
+ decode_threads = 4;
+ }
+
return decode_threads;
+ }
decode_threads = std::max(decode_threads, 0);
decode_threads = std::min(decode_threads, kMaxDecodeThreads);
@@ -100,7 +108,7 @@ static vpx_codec_ctx* InitializeVpxContext(vpx_codec_ctx* context,
vpx_codec_dec_cfg_t vpx_config = {0};
vpx_config.w = config.coded_size().width();
vpx_config.h = config.coded_size().height();
- vpx_config.threads = GetThreadCount();
+ vpx_config.threads = GetThreadCount(config);
vpx_codec_err_t status = vpx_codec_dec_init(context,
config.codec() == kCodecVP9 ?
@@ -172,7 +180,7 @@ void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
return;
}
@@ -224,7 +232,7 @@ void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
// Transition to kDecodeFinished on the first end of stream buffer.
if (state_ == kNormal && buffer->end_of_stream()) {
state_ = kDecodeFinished;
- base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
return;
}
@@ -329,14 +337,12 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
const struct vpx_image* vpx_image_alpha,
scoped_refptr<VideoFrame>* video_frame) {
CHECK(vpx_image);
- CHECK_EQ(vpx_image->d_w % 2, 0U);
- CHECK_EQ(vpx_image->d_h % 2, 0U);
CHECK(vpx_image->fmt == VPX_IMG_FMT_I420 ||
vpx_image->fmt == VPX_IMG_FMT_YV12);
gfx::Size size(vpx_image->d_w, vpx_image->d_h);
- *video_frame = VideoFrame::CreateFrame(
+ *video_frame = frame_pool_.CreateFrame(
vpx_codec_alpha_ ? VideoFrame::YV12A : VideoFrame::YV12,
size,
gfx::Rect(size),
@@ -349,11 +355,11 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
video_frame->get());
CopyUPlane(vpx_image->planes[VPX_PLANE_U],
vpx_image->stride[VPX_PLANE_U],
- vpx_image->d_h / 2,
+ (vpx_image->d_h + 1) / 2,
video_frame->get());
CopyVPlane(vpx_image->planes[VPX_PLANE_V],
vpx_image->stride[VPX_PLANE_V],
- vpx_image->d_h / 2,
+ (vpx_image->d_h + 1) / 2,
video_frame->get());
if (!vpx_codec_alpha_)
return;
diff --git a/chromium/media/filters/vpx_video_decoder.h b/chromium/media/filters/vpx_video_decoder.h
index 680337ae532..cc02e89aad0 100644
--- a/chromium/media/filters/vpx_video_decoder.h
+++ b/chromium/media/filters/vpx_video_decoder.h
@@ -11,6 +11,7 @@
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
+#include "media/base/video_frame_pool.h"
struct vpx_codec_ctx;
struct vpx_image;
@@ -80,6 +81,8 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
vpx_codec_ctx* vpx_codec_;
vpx_codec_ctx* vpx_codec_alpha_;
+ VideoFramePool frame_pool_;
+
DISALLOW_COPY_AND_ASSIGN(VpxVideoDecoder);
};
diff --git a/chromium/media/filters/webvtt_util.h b/chromium/media/filters/webvtt_util.h
new file mode 100644
index 00000000000..b71b66f7205
--- /dev/null
+++ b/chromium/media/filters/webvtt_util.h
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_WEBVTT_UTIL_H_
+#define MEDIA_FILTERS_WEBVTT_UTIL_H_
+
+#include <vector>
+
+namespace media {
+
+// Utility function to create side data item for decoder buffer.
+template<typename T>
+void MakeSideData(T id_begin, T id_end,
+ T settings_begin, T settings_end,
+ std::vector<uint8>* side_data) {
+ // The DecoderBuffer only supports a single side data item. In the case of
+ // a WebVTT cue, we can have potentially two side data items. In order to
+ // avoid disrupting DecoderBuffer any more than we need to, we copy both
+ // side data items onto a single one, and terminate each with a NUL marker.
+ side_data->clear();
+ side_data->insert(side_data->end(), id_begin, id_end);
+ side_data->push_back(0);
+ side_data->insert(side_data->end(), settings_begin, settings_end);
+ side_data->push_back(0);
+}
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_WEBVTT_UTIL_H_
diff --git a/chromium/media/media.gyp b/chromium/media/media.gyp
index 09deb821b22..97e2f5dfba9 100644
--- a/chromium/media/media.gyp
+++ b/chromium/media/media.gyp
@@ -22,12 +22,12 @@
'media_use_libvpx%': 1,
}],
# ALSA usage.
- ['OS=="linux" or OS=="freebsd" or OS=="solaris"', {
+ ['(OS=="linux" or OS=="freebsd" or OS=="solaris") and embedded!=1', {
'use_alsa%': 1,
}, {
'use_alsa%': 0,
}],
- ['os_posix==1 and OS!="mac" and OS!="android" and chromeos!=1', {
+ ['os_posix==1 and OS!="mac" and OS!="android" and chromeos!=1 and embedded!=1', {
'use_pulseaudio%': 1,
}, {
'use_pulseaudio%': 0,
@@ -49,7 +49,8 @@
'../gpu/gpu.gyp:command_buffer_common',
'../skia/skia.gyp:skia',
'../third_party/opus/opus.gyp:opus',
- '../ui/ui.gyp:ui',
+ '../ui/events/events.gyp:events_base',
+ '../ui/gfx/gfx.gyp:gfx',
'../url/url.gyp:url_lib',
'shared_memory_support',
],
@@ -61,8 +62,20 @@
],
'sources': [
'audio/agc_audio_stream.h',
+ 'audio/alsa/alsa_input.cc',
+ 'audio/alsa/alsa_input.h',
+ 'audio/alsa/alsa_output.cc',
+ 'audio/alsa/alsa_output.h',
+ 'audio/alsa/alsa_util.cc',
+ 'audio/alsa/alsa_util.h',
+ 'audio/alsa/alsa_wrapper.cc',
+ 'audio/alsa/alsa_wrapper.h',
+ 'audio/alsa/audio_manager_alsa.cc',
+ 'audio/alsa/audio_manager_alsa.h',
'audio/android/audio_manager_android.cc',
'audio/android/audio_manager_android.h',
+ 'audio/android/audio_record_input.cc',
+ 'audio/android/audio_record_input.h',
'audio/android/opensles_input.cc',
'audio/android/opensles_input.h',
'audio/android/opensles_output.cc',
@@ -102,8 +115,6 @@
'audio/audio_power_monitor.cc',
'audio/audio_power_monitor.h',
'audio/audio_source_diverter.h',
- 'audio/audio_util.cc',
- 'audio/audio_util.h',
'audio/clockless_audio_sink.cc',
'audio/clockless_audio_sink.h',
'audio/cras/audio_manager_cras.cc',
@@ -116,18 +127,13 @@
'audio/fake_audio_consumer.h',
'audio/fake_audio_input_stream.cc',
'audio/fake_audio_input_stream.h',
+ 'audio/fake_audio_log_factory.h',
+ 'audio/fake_audio_log_factory.cc',
+ 'audio/fake_audio_manager.cc',
+ 'audio/fake_audio_manager.h',
'audio/fake_audio_output_stream.cc',
'audio/fake_audio_output_stream.h',
- 'audio/linux/alsa_input.cc',
- 'audio/linux/alsa_input.h',
- 'audio/linux/alsa_output.cc',
- 'audio/linux/alsa_output.h',
- 'audio/linux/alsa_util.cc',
- 'audio/linux/alsa_util.h',
- 'audio/linux/alsa_wrapper.cc',
- 'audio/linux/alsa_wrapper.h',
'audio/linux/audio_manager_linux.cc',
- 'audio/linux/audio_manager_linux.h',
'audio/mac/aggregate_device_manager.cc',
'audio/mac/aggregate_device_manager.h',
'audio/mac/audio_auhal_mac.cc',
@@ -166,6 +172,12 @@
'audio/scoped_loop_observer.h',
'audio/simple_sources.cc',
'audio/simple_sources.h',
+ 'audio/sounds/audio_stream_handler.cc',
+ 'audio/sounds/audio_stream_handler.h',
+ 'audio/sounds/sounds_manager.cc',
+ 'audio/sounds/sounds_manager.h',
+ 'audio/sounds/wav_audio_handler.cc',
+ 'audio/sounds/wav_audio_handler.h',
'audio/virtual_audio_input_stream.cc',
'audio/virtual_audio_input_stream.h',
'audio/virtual_audio_output_stream.cc',
@@ -278,7 +290,6 @@
'base/multi_channel_resampler.h',
'base/pipeline.cc',
'base/pipeline.h',
- 'base/pipeline_status.cc',
'base/pipeline_status.h',
'base/ranges.cc',
'base/ranges.h',
@@ -303,7 +314,13 @@
'base/stream_parser.h',
'base/stream_parser_buffer.cc',
'base/stream_parser_buffer.h',
+ 'base/text_cue.cc',
+ 'base/text_cue.h',
+ 'base/text_renderer.cc',
+ 'base/text_renderer.h',
'base/text_track.h',
+ 'base/text_track_config.cc',
+ 'base/text_track_config.h',
'base/user_input_monitor.cc',
'base/user_input_monitor.h',
'base/user_input_monitor_linux.cc',
@@ -315,6 +332,8 @@
'base/video_decoder_config.h',
'base/video_frame.cc',
'base/video_frame.h',
+ 'base/video_frame_pool.cc',
+ 'base/video_frame_pool.h',
'base/video_renderer.cc',
'base/video_renderer.h',
'base/video_util.cc',
@@ -323,6 +342,10 @@
'base/yuv_convert.h',
'cdm/aes_decryptor.cc',
'cdm/aes_decryptor.h',
+ 'cdm/json_web_key.cc',
+ 'cdm/json_web_key.h',
+ 'cdm/key_system_names.cc',
+ 'cdm/key_system_names.h',
'ffmpeg/ffmpeg_common.cc',
'ffmpeg/ffmpeg_common.h',
'filters/audio_decoder_selector.cc',
@@ -375,26 +398,40 @@
'filters/video_decoder_selector.h',
'filters/video_frame_stream.cc',
'filters/video_frame_stream.h',
- 'filters/video_renderer_base.cc',
- 'filters/video_renderer_base.h',
+ 'filters/video_renderer_impl.cc',
+ 'filters/video_renderer_impl.h',
'filters/vpx_video_decoder.cc',
'filters/vpx_video_decoder.h',
+ 'filters/webvtt_util.h',
'filters/wsola_internals.cc',
'filters/wsola_internals.h',
'midi/midi_manager.cc',
'midi/midi_manager.h',
'midi/midi_manager_mac.cc',
'midi/midi_manager_mac.h',
+ 'midi/midi_message_queue.cc',
+ 'midi/midi_message_queue.h',
+ 'midi/midi_message_util.cc',
+ 'midi/midi_message_util.h',
+ 'midi/midi_manager_win.cc',
+ 'midi/midi_manager_win.h',
'midi/midi_port_info.cc',
'midi/midi_port_info.h',
- 'mp3/mp3_stream_parser.cc',
- 'mp3/mp3_stream_parser.h',
'video/capture/android/video_capture_device_android.cc',
'video/capture/android/video_capture_device_android.h',
'video/capture/fake_video_capture_device.cc',
'video/capture/fake_video_capture_device.h',
+ 'video/capture/file_video_capture_device.cc',
+ 'video/capture/file_video_capture_device.h',
'video/capture/linux/video_capture_device_linux.cc',
'video/capture/linux/video_capture_device_linux.h',
+ 'video/capture/mac/avfoundation_glue.h',
+ 'video/capture/mac/avfoundation_glue.mm',
+ 'video/capture/mac/coremedia_glue.h',
+ 'video/capture/mac/coremedia_glue.mm',
+ 'video/capture/mac/platform_video_capturing_mac.h',
+ 'video/capture/mac/video_capture_device_avfoundation_mac.h',
+ 'video/capture/mac/video_capture_device_avfoundation_mac.mm',
'video/capture/mac/video_capture_device_mac.h',
'video/capture/mac/video_capture_device_mac.mm',
'video/capture/mac/video_capture_device_qtkit_mac.h',
@@ -523,6 +560,10 @@
'base/media.h',
'base/media_stub.cc',
],
+ 'sources!': [
+ 'filters/opus_audio_decoder.cc',
+ 'filters/opus_audio_decoder.h',
+ ],
'conditions': [
['android_webview_build==0', {
'dependencies': [
@@ -553,9 +594,11 @@
'-lasound',
],
},
+ 'defines': [
+ 'USE_ALSA',
+ ],
}, { # use_alsa==0
- 'sources/': [ ['exclude', '/alsa_'],
- ['exclude', '/audio_manager_linux'] ],
+ 'sources/': [ ['exclude', '(^|/)alsa/'], ],
}],
['OS!="openbsd"', {
'sources!': [
@@ -674,7 +717,7 @@
'<@(_inputs)',
],
'process_outputs_as_sources': 1,
- 'message': 'Generating Pulse stubs for dynamic loading.',
+ 'message': 'Generating Pulse stubs for dynamic loading',
},
],
'conditions': [
@@ -712,12 +755,6 @@
'audio/pulse/pulse_util.h',
],
}],
- ['os_posix==1', {
- 'sources!': [
- 'video/capture/video_capture_device_dummy.cc',
- 'video/capture/video_capture_device_dummy.h',
- ],
- }],
['OS=="mac"', {
'link_settings': {
'libraries': [
@@ -732,10 +769,6 @@
},
}],
['OS=="win"', {
- 'sources!': [
- 'video/capture/video_capture_device_dummy.cc',
- 'video/capture/video_capture_device_dummy.h',
- ],
'link_settings': {
'libraries': [
'-lmf.lib',
@@ -774,7 +807,7 @@
}],
],
}],
- ['proprietary_codecs==1 or branding=="Chrome"', {
+ ['proprietary_codecs==1', {
'sources': [
'mp2t/es_parser.h',
'mp2t/es_parser_adts.cc',
@@ -795,6 +828,8 @@
'mp2t/ts_section_pmt.h',
'mp2t/ts_section_psi.cc',
'mp2t/ts_section_psi.h',
+ 'mp3/mp3_stream_parser.cc',
+ 'mp3/mp3_stream_parser.h',
'mp4/aac.cc',
'mp4/aac.h',
'mp4/avc.cc',
@@ -858,10 +893,12 @@
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
+ '../ui/gfx/gfx.gyp:gfx',
'../ui/ui.gyp:ui',
],
'sources': [
'audio/android/audio_android_unittest.cc',
+ 'audio/alsa/alsa_output_unittest.cc',
'audio/audio_input_controller_unittest.cc',
'audio/audio_input_unittest.cc',
'audio/audio_input_volume_unittest.cc',
@@ -873,11 +910,15 @@
'audio/audio_parameters_unittest.cc',
'audio/audio_power_monitor_unittest.cc',
'audio/fake_audio_consumer_unittest.cc',
- 'audio/linux/alsa_output_unittest.cc',
'audio/mac/audio_auhal_mac_unittest.cc',
'audio/mac/audio_device_listener_mac_unittest.cc',
'audio/mac/audio_low_latency_input_mac_unittest.cc',
'audio/simple_sources_unittest.cc',
+ 'audio/sounds/audio_stream_handler_unittest.cc',
+ 'audio/sounds/sounds_manager_unittest.cc',
+ 'audio/sounds/test_data.cc',
+ 'audio/sounds/test_data.h',
+ 'audio/sounds/wav_audio_handler_unittest.cc',
'audio/virtual_audio_input_stream_unittest.cc',
'audio/virtual_audio_output_stream_unittest.cc',
'audio/win/audio_device_listener_win_unittest.cc',
@@ -923,13 +964,16 @@
'base/sinc_resampler_unittest.cc',
'base/test_data_util.cc',
'base/test_data_util.h',
+ 'base/text_renderer_unittest.cc',
'base/user_input_monitor_unittest.cc',
'base/vector_math_testing.h',
'base/vector_math_unittest.cc',
'base/video_frame_unittest.cc',
+ 'base/video_frame_pool_unittest.cc',
'base/video_util_unittest.cc',
'base/yuv_convert_unittest.cc',
'cdm/aes_decryptor_unittest.cc',
+ 'cdm/json_web_key_unittest.cc',
'ffmpeg/ffmpeg_common_unittest.cc',
'filters/audio_decoder_selector_unittest.cc',
'filters/audio_file_reader_unittest.cc',
@@ -959,7 +1003,9 @@
'filters/source_buffer_stream_unittest.cc',
'filters/video_decoder_selector_unittest.cc',
'filters/video_frame_stream_unittest.cc',
- 'filters/video_renderer_base_unittest.cc',
+ 'filters/video_renderer_impl_unittest.cc',
+ 'midi/midi_message_queue_unittest.cc',
+ 'midi/midi_message_util_unittest.cc',
'video/capture/video_capture_device_unittest.cc',
'webm/cluster_builder.cc',
'webm/cluster_builder.h',
@@ -1012,7 +1058,6 @@
'ffmpeg/ffmpeg_common_unittest.cc',
'filters/audio_file_reader_unittest.cc',
'filters/blocking_url_protocol_unittest.cc',
- 'filters/chunk_demuxer_unittest.cc',
'filters/ffmpeg_audio_decoder_unittest.cc',
'filters/ffmpeg_demuxer_unittest.cc',
'filters/ffmpeg_glue_unittest.cc',
@@ -1020,10 +1065,6 @@
'filters/ffmpeg_video_decoder_unittest.cc',
'filters/pipeline_integration_test.cc',
'filters/pipeline_integration_test_base.cc',
- 'mp2t/mp2t_stream_parser_unittest.cc',
- 'mp3/mp3_stream_parser_unittest.cc',
- 'mp4/mp4_stream_parser_unittest.cc',
- 'webm/webm_cluster_parser_unittest.cc',
],
'conditions': [
['gtest_target_type=="shared_library"', {
@@ -1049,7 +1090,7 @@
}],
['use_alsa==0', {
'sources!': [
- 'audio/linux/alsa_output_unittest.cc',
+ 'audio/alsa/alsa_output_unittest.cc',
'audio/audio_low_latency_input_output_unittest.cc',
],
}],
@@ -1079,10 +1120,65 @@
],
},
{
+ 'target_name': 'media_perftests',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ 'shared_memory_support',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../testing/perf/perf_test.gyp:perf_test',
+ '../ui/gfx/gfx.gyp:gfx',
+ '../ui/gl/gl.gyp:gl',
+ '../ui/ui.gyp:ui',
+ ],
+ 'sources': [
+ 'base/audio_bus_perftest.cc',
+ 'base/audio_converter_perftest.cc',
+ 'base/demuxer_perftest.cc',
+ 'base/run_all_unittests.cc',
+ 'base/sinc_resampler_perftest.cc',
+ 'base/test_data_util.cc',
+ 'base/vector_math_perftest.cc',
+ 'filters/pipeline_integration_perftest.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ ],
+ 'conditions': [
+ ['arm_neon==1', {
+ 'defines': [
+ 'USE_NEON'
+ ],
+ }],
+ ['OS=="android"', {
+ 'conditions': [
+ ['gtest_target_type=="shared_library"', {
+ 'dependencies': [
+ '../testing/android/native_test.gyp:native_test_native_code',
+ ],
+ }],
+ ],
+ }],
+ ['media_use_ffmpeg==1', {
+ 'dependencies': [
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ }, { # media_use_ffmpeg==0
+ 'sources!': [
+ 'base/demuxer_perftest.cc',
+ 'filters/pipeline_integration_perftest.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ ],
+ }],
+ ],
+ },
+ {
'target_name': 'media_test_support',
'type': 'static_library',
'dependencies': [
'media',
+ 'shared_memory_support',
'../base/base.gyp:base',
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
@@ -1095,6 +1191,10 @@
'audio/test_audio_input_controller_factory.h',
'base/fake_audio_render_callback.cc',
'base/fake_audio_render_callback.h',
+ 'base/fake_audio_renderer_sink.cc',
+ 'base/fake_audio_renderer_sink.h',
+ 'base/fake_text_track_stream.cc',
+ 'base/fake_text_track_stream.h',
'base/gmock_callback_support.h',
'base/mock_audio_renderer_sink.cc',
'base/mock_audio_renderer_sink.h',
@@ -1146,19 +1246,6 @@
}],
],
},
- {
- 'target_name': 'demuxer_bench',
- 'type': 'executable',
- 'dependencies': [
- 'media',
- '../base/base.gyp:base',
- ],
- 'sources': [
- 'tools/demuxer_bench/demuxer_bench.cc',
- ],
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- 'msvs_disabled_warnings': [ 4267, ],
- },
],
'conditions': [
['target_arch!="arm"', {
@@ -1325,7 +1412,7 @@
'media',
'../base/base.gyp:base',
'../ui/gl/gl.gyp:gl',
- '../ui/ui.gyp:ui',
+ '../ui/gfx/gfx.gyp:gfx',
],
'link_settings': {
'libraries': [
@@ -1373,6 +1460,19 @@
},
'includes': ['../build/apk_test.gypi'],
},
+ {
+ 'target_name': 'media_perftests_apk',
+ 'type': 'none',
+ 'dependencies': [
+ 'media_java',
+ 'media_perftests',
+ ],
+ 'variables': {
+ 'test_suite_name': 'media_perftests',
+ 'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)media_perftests<(SHARED_LIB_SUFFIX)',
+ },
+ 'includes': ['../build/apk_test.gypi'],
+ },
],
}],
['OS=="android"', {
@@ -1382,6 +1482,7 @@
'type': 'none',
'sources': [
'base/android/java/src/org/chromium/media/AudioManagerAndroid.java',
+ 'base/android/java/src/org/chromium/media/AudioRecordInput.java',
'base/android/java/src/org/chromium/media/MediaCodecBridge.java',
'base/android/java/src/org/chromium/media/MediaDrmBridge.java',
'base/android/java/src/org/chromium/media/MediaPlayerBridge.java',
@@ -1390,6 +1491,7 @@
],
'variables': {
'jni_gen_package': 'media',
+ 'jni_generator_ptr_type': 'long',
},
'includes': ['../build/jni_generator.gypi'],
},
@@ -1401,6 +1503,7 @@
],
'variables': {
'jni_gen_package': 'media',
+ 'jni_generator_ptr_type': 'long',
},
'includes': ['../build/jni_generator.gypi'],
},
@@ -1471,7 +1574,7 @@
'template_deps': ['video/capture/android/imageformat_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
- },
+ },
],
}],
['media_use_ffmpeg==1', {
diff --git a/chromium/media/media_cdm.gypi b/chromium/media/media_cdm.gypi
index 2f15fac524c..d495e3437d4 100644
--- a/chromium/media/media_cdm.gypi
+++ b/chromium/media/media_cdm.gypi
@@ -83,6 +83,7 @@
'cdm/ppapi/cdm_video_decoder.h',
'cdm/ppapi/clear_key_cdm.cc',
'cdm/ppapi/clear_key_cdm.h',
+ 'cdm/ppapi/clear_key_cdm_common.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
@@ -98,14 +99,21 @@
],
'sources': [
'cdm/ppapi/api/content_decryption_module.h',
- 'cdm/ppapi/cdm_wrapper.cc',
+ 'cdm/ppapi/cdm_adapter.cc',
+ 'cdm/ppapi/cdm_adapter.h',
+ 'cdm/ppapi/cdm_helpers.cc',
+ 'cdm/ppapi/cdm_helpers.h',
+ 'cdm/ppapi/cdm_logging.cc',
+ 'cdm/ppapi/cdm_logging.h',
+ 'cdm/ppapi/cdm_wrapper.h',
'cdm/ppapi/linked_ptr.h',
+ 'cdm/ppapi/supported_cdm_versions.h',
],
'conditions': [
['os_posix == 1 and OS != "mac" and enable_pepper_cdms==1', {
'cflags': ['-fvisibility=hidden'],
'type': 'loadable_module',
- # Allow the plugin wrapper to find the CDM in the same directory.
+ # Allow the plugin adapter to find the CDM in the same directory.
'ldflags': ['-Wl,-rpath=\$$ORIGIN'],
'libraries': [
# Built by clearkeycdm.
diff --git a/chromium/media/media_perftests.isolate b/chromium/media/media_perftests.isolate
new file mode 100644
index 00000000000..0aa38c0eb60
--- /dev/null
+++ b/chromium/media/media_perftests.isolate
@@ -0,0 +1,14 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['OS=="android"', {
+ 'variables': {
+ 'isolate_dependency_untracked': [
+ 'test/data/',
+ ],
+ },
+ }],
+ ],
+}
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index b3262e4a034..6d3f1d30b95 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -6,12 +6,10 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/thread.h"
namespace media {
-#if !defined(OS_MACOSX)
+#if !defined(OS_MACOSX) && !defined(OS_WIN)
// TODO(crogers): implement MIDIManager for other platforms.
MIDIManager* MIDIManager::Create() {
return NULL;
@@ -22,7 +20,8 @@ MIDIManager::MIDIManager()
: initialized_(false) {
}
-MIDIManager::~MIDIManager() {}
+MIDIManager::~MIDIManager() {
+}
bool MIDIManager::StartSession(MIDIManagerClient* client) {
// Lazily initialize the MIDI back-end.
@@ -63,25 +62,4 @@ void MIDIManager::ReceiveMIDIData(
(*i)->ReceiveMIDIData(port_index, data, length, timestamp);
}
-bool MIDIManager::CurrentlyOnMIDISendThread() {
- return send_thread_->message_loop() == base::MessageLoop::current();
-}
-
-void MIDIManager::DispatchSendMIDIData(MIDIManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp) {
- // Lazily create the thread when first needed.
- if (!send_thread_) {
- send_thread_.reset(new base::Thread("MIDISendThread"));
- send_thread_->Start();
- send_message_loop_ = send_thread_->message_loop_proxy();
- }
-
- send_message_loop_->PostTask(
- FROM_HERE,
- base::Bind(&MIDIManager::SendMIDIData, base::Unretained(this),
- client, port_index, data, timestamp));
-}
-
} // namespace media
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index 6a301a942d9..f42a40de769 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -9,16 +9,10 @@
#include <vector>
#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/synchronization/lock.h"
#include "media/base/media_export.h"
#include "media/midi/midi_port_info.h"
-namespace base {
-class Thread;
-}
-
namespace media {
// A MIDIManagerClient registers with the MIDIManager to receive MIDI data.
@@ -63,17 +57,18 @@ class MEDIA_EXPORT MIDIManager {
// A client calls ReleaseSession() to stop receiving MIDI data.
void EndSession(MIDIManagerClient* client);
- // DispatchSendMIDIData() schedules one or more messages to be sent
- // at the given time on a dedicated thread.
+ // DispatchSendMIDIData() is called when MIDI data should be sent to the MIDI
+ // system.
+ // This method is supposed to return immediately and should not block.
// |port_index| represents the specific output port from output_ports().
// |data| represents a series of bytes encoding one or more MIDI messages.
// |length| is the number of bytes in |data|.
// |timestamp| is the time to send the data, in seconds. A value of 0
// means send "now" or as soon as possible.
- void DispatchSendMIDIData(MIDIManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp);
+ virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) = 0;
// input_ports() is a list of MIDI ports for receiving MIDI data.
// Each individual port in this list can be identified by its
@@ -89,13 +84,6 @@ class MEDIA_EXPORT MIDIManager {
// Initializes the MIDI system, returning |true| on success.
virtual bool Initialize() = 0;
- // Implements the platform-specific details of sending MIDI data.
- // This function runs on MIDISendThread.
- virtual void SendMIDIData(MIDIManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp) = 0;
-
void AddInputPort(const MIDIPortInfo& info);
void AddOutputPort(const MIDIPortInfo& info);
@@ -105,9 +93,6 @@ class MEDIA_EXPORT MIDIManager {
size_t length,
double timestamp);
- // Checks if current thread is MIDISendThread.
- bool CurrentlyOnMIDISendThread();
-
bool initialized_;
// Keeps track of all clients who wish to receive MIDI data.
@@ -120,11 +105,6 @@ class MEDIA_EXPORT MIDIManager {
MIDIPortInfoList input_ports_;
MIDIPortInfoList output_ports_;
- // |send_thread_| is used to send MIDI data by calling the platform-specific
- // API.
- scoped_ptr<base::Thread> send_thread_;
- scoped_refptr<base::MessageLoopProxy> send_message_loop_;
-
DISALLOW_COPY_AND_ASSIGN(MIDIManager);
};
diff --git a/chromium/media/midi/midi_manager_mac.cc b/chromium/media/midi/midi_manager_mac.cc
index 4477944e773..a36d1debe13 100644
--- a/chromium/media/midi/midi_manager_mac.cc
+++ b/chromium/media/midi/midi_manager_mac.cc
@@ -4,12 +4,13 @@
#include "media/midi/midi_manager_mac.h"
-#include <iostream>
#include <string>
#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/sys_string_conversions.h"
+
#include <CoreAudio/HostTime.h>
using base::IntToString;
@@ -31,7 +32,8 @@ MIDIManagerMac::MIDIManagerMac()
coremidi_input_(0),
coremidi_output_(0),
packet_list_(NULL),
- midi_packet_(NULL) {
+ midi_packet_(NULL),
+ send_thread_("MIDISendThread") {
}
bool MIDIManagerMac::Initialize() {
@@ -103,13 +105,31 @@ bool MIDIManagerMac::Initialize() {
return true;
}
+void MIDIManagerMac::DispatchSendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ if (!send_thread_.IsRunning())
+ send_thread_.Start();
+
+ // OK to use base::Unretained(this) since we join to thread in dtor().
+ send_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MIDIManagerMac::SendMIDIData, base::Unretained(this),
+ client, port_index, data, timestamp));
+}
+
MIDIManagerMac::~MIDIManagerMac() {
+ // Wait for the termination of |send_thread_| before disposing MIDI ports.
+ send_thread_.Stop();
+
if (coremidi_input_)
MIDIPortDispose(coremidi_input_);
if (coremidi_output_)
MIDIPortDispose(coremidi_output_);
}
+// static
void MIDIManagerMac::ReadMIDIDispatch(const MIDIPacketList* packet_list,
void* read_proc_refcon,
void* src_conn_refcon) {
@@ -133,7 +153,7 @@ void MIDIManagerMac::ReadMIDI(MIDIEndpointRef source,
uint32 port_index = source_map_[source];
// Go through each packet and process separately.
- for(size_t i = 0; i < packet_list->numPackets; i++) {
+ for (size_t i = 0; i < packet_list->numPackets; i++) {
// Each packet contains MIDI data for one or more messages (like note-on).
const MIDIPacket &packet = packet_list->packet[i];
double timestamp_seconds = MIDITimeStampToSeconds(packet.timeStamp);
@@ -150,7 +170,7 @@ void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
uint32 port_index,
const std::vector<uint8>& data,
double timestamp) {
- DCHECK(CurrentlyOnMIDISendThread());
+ DCHECK(send_thread_.message_loop_proxy()->BelongsToCurrentThread());
// System Exclusive has already been filtered.
MIDITimeStamp coremidi_timestamp = SecondsToMIDITimeStamp(timestamp);
@@ -177,34 +197,57 @@ void MIDIManagerMac::SendMIDIData(MIDIManagerClient* client,
client->AccumulateMIDIBytesSent(data.size());
}
+// static
MIDIPortInfo MIDIManagerMac::GetPortInfoFromEndpoint(
MIDIEndpointRef endpoint) {
SInt32 id_number = 0;
MIDIObjectGetIntegerProperty(endpoint, kMIDIPropertyUniqueID, &id_number);
string id = IntToString(id_number);
+ string manufacturer;
CFStringRef manufacturer_ref = NULL;
- MIDIObjectGetStringProperty(
+ OSStatus result = MIDIObjectGetStringProperty(
endpoint, kMIDIPropertyManufacturer, &manufacturer_ref);
- string manufacturer = SysCFStringRefToUTF8(manufacturer_ref);
+ if (result == noErr) {
+ manufacturer = SysCFStringRefToUTF8(manufacturer_ref);
+ } else {
+ // kMIDIPropertyManufacturer is not supported in IAC driver providing
+ // endpoints, and the result will be kMIDIUnknownProperty (-10835).
+ DLOG(WARNING) << "Failed to get kMIDIPropertyManufacturer with status "
+ << result;
+ }
+ string name;
CFStringRef name_ref = NULL;
- MIDIObjectGetStringProperty(endpoint, kMIDIPropertyName, &name_ref);
- string name = SysCFStringRefToUTF8(name_ref);
+ result = MIDIObjectGetStringProperty(endpoint, kMIDIPropertyName, &name_ref);
+ if (result == noErr)
+ name = SysCFStringRefToUTF8(name_ref);
+ else
+ DLOG(WARNING) << "Failed to get kMIDIPropertyName with status " << result;
+ string version;
SInt32 version_number = 0;
- MIDIObjectGetIntegerProperty(
+ result = MIDIObjectGetIntegerProperty(
endpoint, kMIDIPropertyDriverVersion, &version_number);
- string version = IntToString(version_number);
+ if (result == noErr) {
+ version = IntToString(version_number);
+ } else {
+ // kMIDIPropertyDriverVersion is not supported in IAC driver providing
+ // endpoints, and the result will be kMIDIUnknownProperty (-10835).
+ DLOG(WARNING) << "Failed to get kMIDIPropertyDriverVersion with status "
+ << result;
+ }
return MIDIPortInfo(id, manufacturer, name, version);
}
+// static
double MIDIManagerMac::MIDITimeStampToSeconds(MIDITimeStamp timestamp) {
UInt64 nanoseconds = AudioConvertHostTimeToNanos(timestamp);
return static_cast<double>(nanoseconds) / 1.0e9;
}
+// static
MIDITimeStamp MIDIManagerMac::SecondsToMIDITimeStamp(double seconds) {
UInt64 nanos = UInt64(seconds * 1.0e9);
return AudioConvertNanosToHostTime(nanos);
diff --git a/chromium/media/midi/midi_manager_mac.h b/chromium/media/midi/midi_manager_mac.h
index 2397b8034f7..cc8bf74a3c5 100644
--- a/chromium/media/midi/midi_manager_mac.h
+++ b/chromium/media/midi/midi_manager_mac.h
@@ -12,6 +12,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/threading/thread.h"
#include "media/midi/midi_manager.h"
#include "media/midi/midi_port_info.h"
@@ -24,10 +25,10 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// MIDIManager implementation.
virtual bool Initialize() OVERRIDE;
- virtual void SendMIDIData(MIDIManagerClient* client,
- uint32 port_index,
- const std::vector<uint8>& data,
- double timestamp) OVERRIDE;
+ virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) OVERRIDE;
private:
// CoreMIDI callback for MIDI data.
@@ -39,6 +40,12 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
void *src_conn_refcon);
virtual void ReadMIDI(MIDIEndpointRef source, const MIDIPacketList *pktlist);
+ // An internal callback that runs on MIDISendThread.
+ void SendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp);
+
// Helper
static media::MIDIPortInfo GetPortInfoFromEndpoint(MIDIEndpointRef endpoint);
static double MIDITimeStampToSeconds(MIDITimeStamp timestamp);
@@ -62,6 +69,9 @@ class MEDIA_EXPORT MIDIManagerMac : public MIDIManager {
// Keeps track of all destinations.
std::vector<MIDIEndpointRef> destinations_;
+ // |send_thread_| is used to send MIDI data.
+ base::Thread send_thread_;
+
DISALLOW_COPY_AND_ASSIGN(MIDIManagerMac);
};
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
new file mode 100644
index 00000000000..d250e6aefff
--- /dev/null
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -0,0 +1,597 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_manager_win.h"
+
+#include <windows.h>
+
+// Prevent unnecessary functions from being included from <mmsystem.h>
+#define MMNODRV
+#define MMNOSOUND
+#define MMNOWAVE
+#define MMNOAUX
+#define MMNOMIXER
+#define MMNOTIMER
+#define MMNOJOY
+#define MMNOMCI
+#define MMNOMMIO
+#include <mmsystem.h>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread.h"
+#include "media/midi/midi_message_queue.h"
+#include "media/midi/midi_message_util.h"
+#include "media/midi/midi_port_info.h"
+
+namespace media {
+namespace {
+
+std::string GetInErrorMessage(MMRESULT result) {
+ wchar_t text[MAXERRORLENGTH];
+ MMRESULT get_result = midiInGetErrorText(result, text, arraysize(text));
+ if (get_result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to get error message."
+ << " original error: " << result
+ << " midiInGetErrorText error: " << get_result;
+ return std::string();
+ }
+ return WideToUTF8(text);
+}
+
+std::string GetOutErrorMessage(MMRESULT result) {
+ wchar_t text[MAXERRORLENGTH];
+ MMRESULT get_result = midiOutGetErrorText(result, text, arraysize(text));
+ if (get_result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to get error message."
+ << " original error: " << result
+ << " midiOutGetErrorText error: " << get_result;
+ return std::string();
+ }
+ return WideToUTF8(text);
+}
+
+class MIDIHDRDeleter {
+ public:
+ void operator()(MIDIHDR* header) {
+ if (!header)
+ return;
+ delete[] static_cast<char*>(header->lpData);
+ header->lpData = NULL;
+ header->dwBufferLength = 0;
+ delete header;
+ }
+};
+
+typedef scoped_ptr<MIDIHDR, MIDIHDRDeleter> ScopedMIDIHDR;
+
+ScopedMIDIHDR CreateMIDIHDR(size_t size) {
+ ScopedMIDIHDR header(new MIDIHDR);
+ ZeroMemory(header.get(), sizeof(*header));
+ header->lpData = new char[size];
+ header->dwBufferLength = size;
+ return header.Pass();
+}
+
+void SendShortMIDIMessageInternal(HMIDIOUT midi_out_handle,
+ const std::vector<uint8>& message) {
+ if (message.size() >= 4)
+ return;
+
+ DWORD packed_message = 0;
+ for (size_t i = 0; i < message.size(); ++i)
+ packed_message |= (static_cast<uint32>(message[i]) << (i * 8));
+ MMRESULT result = midiOutShortMsg(midi_out_handle, packed_message);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to output short message: " << GetOutErrorMessage(result);
+}
+
+void SendLongMIDIMessageInternal(HMIDIOUT midi_out_handle,
+ const std::vector<uint8>& message) {
+ // Implementation note:
+ // Sending long MIDI message can be performed synchronously or asynchronously
+ // depending on the driver. There are 2 options to support both cases:
+ // 1) Call midiOutLongMsg() API and wait for its completion within this
+ // function. In this approach, we can avoid memory copy by directly pointing
+ // |message| as the data buffer to be sent.
+ // 2) Allocate a buffer and copy |message| to it, then call midiOutLongMsg()
+ // API. The buffer will be freed in the MOM_DONE event hander, which tells
+ // us that the task of midiOutLongMsg() API is completed.
+ // Here we choose option 2) in favor of asynchronous design.
+
+ // Note for built-in USB-MIDI driver:
+ // From an observation on Windows 7/8.1 with a USB-MIDI keyboard,
+ // midiOutLongMsg() will be always blocked. Sending 64 bytes or less data
+ // takes roughly 300 usecs. Sending 2048 bytes or more data takes roughly
+ // |message.size() / (75 * 1024)| secs in practice. Here we put 60 KB size
+ // limit on SysEx message, with hoping that midiOutLongMsg will be blocked at
+ // most 1 sec or so with a typical USB-MIDI device.
+ const size_t kSysExSizeLimit = 60 * 1024;
+ if (message.size() >= kSysExSizeLimit) {
+ DVLOG(1) << "Ingnoreing SysEx message due to the size limit"
+ << ", size = " << message.size();
+ return;
+ }
+
+ ScopedMIDIHDR midi_header(CreateMIDIHDR(message.size()));
+ for (size_t i = 0; i < message.size(); ++i)
+ midi_header->lpData[i] = static_cast<char>(message[i]);
+
+ MMRESULT result = midiOutPrepareHeader(
+ midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to prepare output buffer: "
+ << GetOutErrorMessage(result);
+ return;
+ }
+
+ result = midiOutLongMsg(
+ midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to output long message: "
+ << GetOutErrorMessage(result);
+ result = midiOutUnprepareHeader(
+ midi_out_handle, midi_header.get(), sizeof(*midi_header));
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to uninitialize output buffer: "
+ << GetOutErrorMessage(result);
+ return;
+ }
+
+ // The ownership of |midi_header| is moved to MOM_DONE event handler.
+ midi_header.release();
+}
+
+} // namespace
+
+class MIDIManagerWin::InDeviceInfo {
+ public:
+ ~InDeviceInfo() {
+ Uninitialize();
+ }
+ void set_port_index(int index) {
+ port_index_ = index;
+ }
+ int port_index() const {
+ return port_index_;
+ }
+ bool device_to_be_closed() const {
+ return device_to_be_closed_;
+ }
+ HMIDIIN midi_handle() const {
+ return midi_handle_;
+ }
+ const base::TimeDelta& start_time_offset() const {
+ return start_time_offset_;
+ }
+
+ static scoped_ptr<InDeviceInfo> Create(MIDIManagerWin* manager,
+ UINT device_id) {
+ scoped_ptr<InDeviceInfo> obj(new InDeviceInfo(manager));
+ if (!obj->Initialize(device_id))
+ obj.reset();
+ return obj.Pass();
+ }
+
+ private:
+ static const int kInvalidPortIndex = -1;
+ static const size_t kBufferLength = 32 * 1024;
+
+ explicit InDeviceInfo(MIDIManagerWin* manager)
+ : manager_(manager),
+ port_index_(kInvalidPortIndex),
+ midi_handle_(NULL),
+ started_(false),
+ device_to_be_closed_(false) {
+ }
+
+ bool Initialize(DWORD device_id) {
+ Uninitialize();
+ midi_header_ = CreateMIDIHDR(kBufferLength);
+
+ // Here we use |CALLBACK_FUNCTION| to subscribe MIM_DATA, MIM_LONGDATA, and
+ // MIM_CLOSE events.
+ // - MIM_DATA: This is the only way to get a short MIDI message with
+ // timestamp information.
+ // - MIM_LONGDATA: This is the only way to get a long MIDI message with
+ // timestamp information.
+ // - MIM_CLOSE: This event is sent when 1) midiInClose() is called, or 2)
+ // the MIDI device becomes unavailable for some reasons, e.g., the cable
+ // is disconnected. As for the former case, HMIDIOUT will be invalidated
+ // soon after the callback is finished. As for the later case, however,
+ // HMIDIOUT continues to be valid until midiInClose() is called.
+ MMRESULT result = midiInOpen(&midi_handle_,
+ device_id,
+ reinterpret_cast<DWORD_PTR>(&HandleMessage),
+ reinterpret_cast<DWORD_PTR>(this),
+ CALLBACK_FUNCTION);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to open output device. "
+ << " id: " << device_id
+ << " message: " << GetInErrorMessage(result);
+ return false;
+ }
+ result = midiInPrepareHeader(
+ midi_handle_, midi_header_.get(), sizeof(*midi_header_));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to initialize input buffer: "
+ << GetInErrorMessage(result);
+ return false;
+ }
+ result = midiInAddBuffer(
+ midi_handle_, midi_header_.get(), sizeof(*midi_header_));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to attach input buffer: "
+ << GetInErrorMessage(result);
+ return false;
+ }
+ result = midiInStart(midi_handle_);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to start input port: "
+ << GetInErrorMessage(result);
+ return false;
+ }
+ started_ = true;
+ start_time_offset_ = base::TimeTicks::Now() - base::TimeTicks();
+ return true;
+ }
+
+ void Uninitialize() {
+ MMRESULT result = MMSYSERR_NOERROR;
+ if (midi_handle_ && started_) {
+ result = midiInStop(midi_handle_);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to stop input port: " << GetInErrorMessage(result);
+ started_ = false;
+ start_time_offset_ = base::TimeDelta();
+ }
+ if (midi_handle_) {
+ // midiInReset flushes pending messages. We ignore these messages.
+ device_to_be_closed_ = true;
+ result = midiInReset(midi_handle_);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to reset input port: " << GetInErrorMessage(result);
+ result = midiInClose(midi_handle_);
+ device_to_be_closed_ = false;
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to close input port: " << GetInErrorMessage(result);
+ midi_header_.reset();
+ midi_handle_ = NULL;
+ port_index_ = kInvalidPortIndex;
+ }
+ }
+
+ static void CALLBACK HandleMessage(HMIDIIN midi_in_handle,
+ UINT message,
+ DWORD_PTR instance,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ // This method can be called back on any thread depending on Windows
+ // multimedia subsystem and underlying MIDI drivers.
+ InDeviceInfo* self = reinterpret_cast<InDeviceInfo*>(instance);
+ if (!self)
+ return;
+ if (self->midi_handle() != midi_in_handle)
+ return;
+
+ switch (message) {
+ case MIM_DATA:
+ self->OnShortMessageReceived(static_cast<uint8>(param1 & 0xff),
+ static_cast<uint8>((param1 >> 8) & 0xff),
+ static_cast<uint8>((param1 >> 16) & 0xff),
+ self->TickToTimeDelta(param2));
+ return;
+ case MIM_LONGDATA:
+ self->OnLongMessageReceived(reinterpret_cast<MIDIHDR*>(param1),
+ self->TickToTimeDelta(param2));
+ return;
+ case MIM_CLOSE:
+ // TODO(yukawa): Implement crbug.com/279097.
+ return;
+ }
+ }
+
+ void OnShortMessageReceived(uint8 status_byte,
+ uint8 first_data_byte,
+ uint8 second_data_byte,
+ base::TimeDelta timestamp) {
+ if (device_to_be_closed())
+ return;
+ const size_t len = GetMIDIMessageLength(status_byte);
+ if (len == 0 || port_index() == kInvalidPortIndex)
+ return;
+ const uint8 kData[] = { status_byte, first_data_byte, second_data_byte };
+ DCHECK_LE(len, arraysize(kData));
+ manager_->ReceiveMIDIData(port_index(), kData, len, timestamp.InSecondsF());
+ }
+
+ void OnLongMessageReceived(MIDIHDR* header, base::TimeDelta timestamp) {
+ if (header != midi_header_.get())
+ return;
+ MMRESULT result = MMSYSERR_NOERROR;
+ if (device_to_be_closed()) {
+ if (midi_header_ &&
+ (midi_header_->dwFlags & MHDR_PREPARED) == MHDR_PREPARED) {
+ result = midiInUnprepareHeader(
+ midi_handle_, midi_header_.get(), sizeof(*midi_header_));
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to uninitialize input buffer: "
+ << GetInErrorMessage(result);
+ }
+ return;
+ }
+ if (header->dwBytesRecorded > 0 && port_index() != kInvalidPortIndex) {
+ manager_->ReceiveMIDIData(port_index_,
+ reinterpret_cast<const uint8*>(header->lpData),
+ header->dwBytesRecorded,
+ timestamp.InSecondsF());
+ }
+ result = midiInAddBuffer(midi_handle(), header, sizeof(*header));
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to attach input port: " << GetInErrorMessage(result);
+ }
+
+ base::TimeDelta TickToTimeDelta(DWORD tick) const {
+ const base::TimeDelta delta =
+ base::TimeDelta::FromMicroseconds(static_cast<uint32>(tick));
+ return start_time_offset_ + delta;
+ }
+
+ MIDIManagerWin* manager_;
+ int port_index_;
+ HMIDIIN midi_handle_;
+ ScopedMIDIHDR midi_header_;
+ base::TimeDelta start_time_offset_;
+ bool started_;
+ bool device_to_be_closed_;
+ DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin::InDeviceInfo);
+};
+
+class MIDIManagerWin::OutDeviceInfo {
+ public:
+ ~OutDeviceInfo() {
+ Uninitialize();
+ }
+
+ static scoped_ptr<OutDeviceInfo> Create(UINT device_id) {
+ scoped_ptr<OutDeviceInfo> obj(new OutDeviceInfo);
+ if (!obj->Initialize(device_id))
+ obj.reset();
+ return obj.Pass();
+ }
+
+ HMIDIOUT midi_handle() const {
+ return midi_handle_;
+ }
+
+ void Quit() {
+ quitting_ = true;
+ }
+
+ void Send(const std::vector<uint8>& data) {
+ // Check if the attached device is still available or not.
+ if (!midi_handle_)
+ return;
+
+ // Give up sending MIDI messages here if the device is already closed.
+ // Note that this check is optional. Regardless of that we check |closed_|
+ // or not, nothing harmful happens as long as |midi_handle_| is still valid.
+ if (closed_)
+ return;
+
+ // MIDI Running status must be filtered out.
+ MIDIMessageQueue message_queue(false);
+ message_queue.Add(data);
+ std::vector<uint8> message;
+ while (!quitting_) {
+ message_queue.Get(&message);
+ if (message.empty())
+ break;
+ // SendShortMIDIMessageInternal can send a MIDI message up to 3 bytes.
+ if (message.size() <= 3)
+ SendShortMIDIMessageInternal(midi_handle_, message);
+ else
+ SendLongMIDIMessageInternal(midi_handle_, message);
+ }
+ }
+
+ private:
+ OutDeviceInfo()
+ : midi_handle_(NULL),
+ closed_(false),
+ quitting_(false) {}
+
+ bool Initialize(DWORD device_id) {
+ Uninitialize();
+ // Here we use |CALLBACK_FUNCTION| to subscribe MOM_DONE and MOM_CLOSE
+ // events.
+ // - MOM_DONE: SendLongMIDIMessageInternal() relies on this event to clean
+ // up the backing store where a long MIDI message is stored.
+ // - MOM_CLOSE: This event is sent when 1) midiOutClose() is called, or 2)
+ // the MIDI device becomes unavailable for some reasons, e.g., the cable
+ // is disconnected. As for the former case, HMIDIOUT will be invalidated
+ // soon after the callback is finished. As for the later case, however,
+ // HMIDIOUT continues to be valid until midiOutClose() is called.
+ MMRESULT result = midiOutOpen(&midi_handle_,
+ device_id,
+ reinterpret_cast<DWORD_PTR>(&HandleMessage),
+ reinterpret_cast<DWORD_PTR>(this),
+ CALLBACK_FUNCTION);
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to open output device. "
+ << " id: " << device_id
+ << " message: "<< GetOutErrorMessage(result);
+ midi_handle_ = NULL;
+ return false;
+ }
+ return true;
+ }
+
+ void Uninitialize() {
+ if (!midi_handle_)
+ return;
+
+ MMRESULT result = midiOutReset(midi_handle_);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to reset output port: " << GetOutErrorMessage(result);
+ result = midiOutClose(midi_handle_);
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to close output port: " << GetOutErrorMessage(result);
+ midi_handle_ = NULL;
+ closed_ = true;
+ }
+
+ static void CALLBACK HandleMessage(HMIDIOUT midi_out_handle,
+ UINT message,
+ DWORD_PTR instance,
+ DWORD_PTR param1,
+ DWORD_PTR param2) {
+ // This method can be called back on any thread depending on Windows
+ // multimedia subsystem and underlying MIDI drivers.
+
+ OutDeviceInfo* self = reinterpret_cast<OutDeviceInfo*>(instance);
+ if (!self)
+ return;
+ if (self->midi_handle() != midi_out_handle)
+ return;
+ switch (message) {
+ case MOM_DONE: {
+ // Take ownership of the MIDIHDR object.
+ ScopedMIDIHDR header(reinterpret_cast<MIDIHDR*>(param1));
+ if (!header)
+ return;
+ MMRESULT result = midiOutUnprepareHeader(
+ self->midi_handle(), header.get(), sizeof(*header));
+ DLOG_IF(ERROR, result != MMSYSERR_NOERROR)
+ << "Failed to uninitialize output buffer: "
+ << GetOutErrorMessage(result);
+ return;
+ }
+ case MOM_CLOSE:
+ // No lock is required since this flag is just a hint to avoid
+ // unnecessary API calls that will result in failure anyway.
+ self->closed_ = true;
+ // TODO(yukawa): Implement crbug.com/279097.
+ return;
+ }
+ }
+
+ HMIDIOUT midi_handle_;
+
+ // True if the device is already closed.
+ volatile bool closed_;
+
+ // True if the MIDIManagerWin is trying to stop the sender thread.
+ volatile bool quitting_;
+
+ DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin::OutDeviceInfo);
+};
+
+MIDIManagerWin::MIDIManagerWin()
+ : send_thread_("MIDISendThread") {
+}
+
+bool MIDIManagerWin::Initialize() {
+ const UINT num_in_devices = midiInGetNumDevs();
+ in_devices_.reserve(num_in_devices);
+ for (UINT device_id = 0; device_id < num_in_devices; ++device_id) {
+ MIDIINCAPS caps = {};
+ MMRESULT result = midiInGetDevCaps(device_id, &caps, sizeof(caps));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to obtain input device info: "
+ << GetInErrorMessage(result);
+ continue;
+ }
+ scoped_ptr<InDeviceInfo> in_device(InDeviceInfo::Create(this, device_id));
+ if (!in_device)
+ continue;
+ MIDIPortInfo info(
+ base::IntToString(static_cast<int>(device_id)),
+ "",
+ base::WideToUTF8(caps.szPname),
+ base::IntToString(static_cast<int>(caps.vDriverVersion)));
+ AddInputPort(info);
+ in_device->set_port_index(input_ports_.size() - 1);
+ in_devices_.push_back(in_device.Pass());
+ }
+
+ const UINT num_out_devices = midiOutGetNumDevs();
+ out_devices_.reserve(num_out_devices);
+ for (UINT device_id = 0; device_id < num_out_devices; ++device_id) {
+ MIDIOUTCAPS caps = {};
+ MMRESULT result = midiOutGetDevCaps(device_id, &caps, sizeof(caps));
+ if (result != MMSYSERR_NOERROR) {
+ DLOG(ERROR) << "Failed to obtain output device info: "
+ << GetOutErrorMessage(result);
+ continue;
+ }
+ scoped_ptr<OutDeviceInfo> out_port(OutDeviceInfo::Create(device_id));
+ if (!out_port)
+ continue;
+ MIDIPortInfo info(
+ base::IntToString(static_cast<int>(device_id)),
+ "",
+ base::WideToUTF8(caps.szPname),
+ base::IntToString(static_cast<int>(caps.vDriverVersion)));
+ AddOutputPort(info);
+ out_devices_.push_back(out_port.Pass());
+ }
+
+ return true;
+}
+
+MIDIManagerWin::~MIDIManagerWin() {
+ // Cleanup order is important. |send_thread_| must be stopped before
+ // |out_devices_| is cleared.
+ for (size_t i = 0; i < output_ports_.size(); ++i)
+ out_devices_[i]->Quit();
+ send_thread_.Stop();
+
+ out_devices_.clear();
+ output_ports_.clear();
+ in_devices_.clear();
+ input_ports_.clear();
+}
+
+void MIDIManagerWin::DispatchSendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ if (out_devices_.size() <= port_index)
+ return;
+
+ base::TimeDelta delay;
+ if (timestamp != 0.0) {
+ base::TimeTicks time_to_send =
+ base::TimeTicks() + base::TimeDelta::FromMicroseconds(
+ timestamp * base::Time::kMicrosecondsPerSecond);
+ delay = std::max(time_to_send - base::TimeTicks::Now(), base::TimeDelta());
+ }
+
+ if (!send_thread_.IsRunning())
+ send_thread_.Start();
+
+ OutDeviceInfo* out_port = out_devices_[port_index].get();
+ send_thread_.message_loop()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&OutDeviceInfo::Send, base::Unretained(out_port), data),
+ delay);
+
+ // Call back AccumulateMIDIBytesSent() on |send_thread_| to emulate the
+ // behavior of MIDIManagerMac::SendMIDIData.
+ // TODO(yukawa): Do this task in a platform-independent way if possible.
+ // See crbug.com/325810.
+ send_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&MIDIManagerClient::AccumulateMIDIBytesSent,
+ base::Unretained(client), data.size()));
+}
+
+MIDIManager* MIDIManager::Create() {
+ return new MIDIManagerWin();
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_manager_win.h b/chromium/media/midi/midi_manager_win.h
new file mode 100644
index 00000000000..eef8b4e5680
--- /dev/null
+++ b/chromium/media/midi/midi_manager_win.h
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MANAGER_WIN_H_
+#define MEDIA_MIDI_MIDI_MANAGER_WIN_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "media/midi/midi_manager.h"
+
+namespace media {
+
+class MIDIManagerWin : public MIDIManager {
+ public:
+ MIDIManagerWin();
+ virtual ~MIDIManagerWin();
+
+ // MIDIManager implementation.
+ virtual bool Initialize() OVERRIDE;
+ virtual void DispatchSendMIDIData(MIDIManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) OVERRIDE;
+
+ private:
+ class InDeviceInfo;
+ class OutDeviceInfo;
+ std::vector<scoped_ptr<InDeviceInfo> > in_devices_;
+ std::vector<scoped_ptr<OutDeviceInfo> > out_devices_;
+ base::Thread send_thread_;
+ DISALLOW_COPY_AND_ASSIGN(MIDIManagerWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MANAGER_WIN_H_
diff --git a/chromium/media/midi/midi_message_queue.cc b/chromium/media/midi/midi_message_queue.cc
new file mode 100644
index 00000000000..3452e80be9d
--- /dev/null
+++ b/chromium/media/midi/midi_message_queue.cc
@@ -0,0 +1,119 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_message_queue.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/midi/midi_message_util.h"
+
+namespace media {
+namespace {
+
+const uint8 kSysEx = 0xf0;
+const uint8 kEndOfSysEx = 0xf7;
+
+bool IsDataByte(uint8 data) {
+ return (data & 0x80) == 0;
+}
+
+bool IsFirstStatusByte(uint8 data) {
+ return !IsDataByte(data) && data != kEndOfSysEx;
+}
+
+bool IsSystemRealTimeMessage(uint8 data) {
+ return 0xf8 <= data && data <= 0xff;
+}
+
+} // namespace
+
+MIDIMessageQueue::MIDIMessageQueue(bool allow_running_status)
+ : allow_running_status_(allow_running_status) {}
+
+MIDIMessageQueue::~MIDIMessageQueue() {}
+
+void MIDIMessageQueue::Add(const std::vector<uint8>& data) {
+ queue_.insert(queue_.end(), data.begin(), data.end());
+}
+
+void MIDIMessageQueue::Add(const uint8* data, size_t length) {
+ queue_.insert(queue_.end(), data, data + length);
+}
+
+void MIDIMessageQueue::Get(std::vector<uint8>* message) {
+ message->clear();
+
+ while (true) {
+ if (queue_.empty())
+ return;
+
+ const uint8 next = queue_.front();
+ queue_.pop_front();
+
+ // "System Real Time Messages" is a special kind of MIDI messages, which can
+ // appear at arbitrary byte position of MIDI stream. Here we reorder
+ // "System Real Time Messages" prior to |next_message_| so that each message
+ // can be clearly separated as a complete MIDI message.
+ if (IsSystemRealTimeMessage(next)) {
+ message->push_back(next);
+ return;
+ }
+
+ // Here |next_message_[0]| may contain the previous status byte when
+ // |allow_running_status_| is true. Following condition fixes up
+ // |next_message_| if running status condition is not fulfilled.
+ if (!next_message_.empty() &&
+ ((next_message_[0] == kSysEx && IsFirstStatusByte(next)) ||
+ (next_message_[0] != kSysEx && !IsDataByte(next)))) {
+ // An invalid data sequence is found or running status condition is not
+ // fulfilled.
+ next_message_.clear();
+ }
+
+ if (next_message_.empty()) {
+ if (IsFirstStatusByte(next)) {
+ next_message_.push_back(next);
+ } else {
+ // MIDI protocol doesn't provide any error correction mechanism in
+ // physical layers, and incoming messages can be corrupted, and should
+ // be corrected here.
+ }
+ continue;
+ }
+
+ // Here we can assume |next_message_| starts with a valid status byte.
+ const uint8 status_byte = next_message_[0];
+ next_message_.push_back(next);
+
+ if (status_byte == kSysEx) {
+ if (next == kEndOfSysEx) {
+ std::swap(*message, next_message_);
+ next_message_.clear();
+ return;
+ }
+ continue;
+ }
+
+ DCHECK(IsDataByte(next));
+ DCHECK_NE(kSysEx, status_byte);
+ const size_t target_len = GetMIDIMessageLength(status_byte);
+ if (next_message_.size() < target_len)
+ continue;
+ if (next_message_.size() == target_len) {
+ std::swap(*message, next_message_);
+ next_message_.clear();
+ if (allow_running_status_) {
+ // Speculatively keep the status byte in case of running status. If this
+ // assumption is not true, |next_message_| will be cleared anyway.
+ next_message_.push_back(status_byte);
+ }
+ return;
+ }
+
+ NOTREACHED();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_message_queue.h b/chromium/media/midi/midi_message_queue.h
new file mode 100644
index 00000000000..06f0f4787fb
--- /dev/null
+++ b/chromium/media/midi/midi_message_queue.h
@@ -0,0 +1,72 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MESSAGE_QUEUE_H_
+#define MEDIA_MIDI_MIDI_MESSAGE_QUEUE_H_
+
+#include <deque>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// A simple message splitter for possibly unsafe/corrupted MIDI data stream.
+// This class allows you to:
+// - maintain fragmented MIDI message.
+// - skip any invalid data sequence.
+// - reorder MIDI messages so that "System Real Time Message", which can be
+// inserted at any point of the byte stream, is placed at the boundary of
+// complete MIDI messages.
+// - (Optional) reconstruct complete MIDI messages from data stream where
+// MIDI status byte is abbreviated (a.k.a. "running status").
+//
+// Example (pseudo message loop):
+// MIDIMessageQueue queue(true); // true to support "running status"
+// while (true) {
+// if (is_incoming_midi_data_available()) {
+// std::vector<uint8> incoming_data;
+// read_incoming_midi_data(&incoming_data)
+// queue.Add(incoming_data);
+// }
+// while (true) {
+// std::vector<uint8> next_message;
+// queue.Get(&next_message);
+// if (!next_message.empty())
+// dispatch(next_message);
+// }
+// }
+class MEDIA_EXPORT MIDIMessageQueue {
+ public:
+ // Initializes the queue. Set true to |allow_running_status| to enable
+ // "MIDI running status" reconstruction.
+ explicit MIDIMessageQueue(bool allow_running_status);
+ ~MIDIMessageQueue();
+
+ // Enqueues |data| to the internal buffer.
+ void Add(const std::vector<uint8>& data);
+ void Add(const uint8* data, size_t length);
+
+ // Fills the next complete MIDI message into |message|. If |message| is
+ // not empty, the data sequence falls into one of the following types of
+ // MIDI message.
+ // - Single "Channel Voice Message" (w/o "System Real Time Messages")
+ // - Single "Channel Mode Message" (w/o "System Real Time Messages")
+ // - Single "System Exclusive Message" (w/o "System Real Time Messages")
+ // - Single "System Common Message" (w/o "System Real Time Messages")
+ // - Single "System Real Time message"
+ // |message| is empty if there is no complete MIDI message any more.
+ void Get(std::vector<uint8>* message);
+
+ private:
+ std::deque<uint8> queue_;
+ std::vector<uint8> next_message_;
+ const bool allow_running_status_;
+ DISALLOW_COPY_AND_ASSIGN(MIDIMessageQueue);
+};
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MESSAGE_QUEUE_H_
diff --git a/chromium/media/midi/midi_message_queue_unittest.cc b/chromium/media/midi/midi_message_queue_unittest.cc
new file mode 100644
index 00000000000..a00eea6b9e4
--- /dev/null
+++ b/chromium/media/midi/midi_message_queue_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_message_queue.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace {
+
+const uint8 kGMOn[] = { 0xf0, 0x7e, 0x7f, 0x09, 0x01, 0xf7 };
+const uint8 kGSOn[] = {
+ 0xf0, 0x41, 0x10, 0x42, 0x12, 0x40, 0x00, 0x7f, 0x00, 0x41, 0xf7,
+};
+const uint8 kNoteOn[] = { 0x90, 0x3c, 0x7f };
+const uint8 kNoteOnWithRunningStatus[] = {
+ 0x90, 0x3c, 0x7f, 0x3c, 0x7f, 0x3c, 0x7f,
+};
+const uint8 kChannelPressure[] = { 0xd0, 0x01 };
+const uint8 kChannelPressureWithRunningStatus[] = {
+ 0xd0, 0x01, 0x01, 0x01,
+};
+const uint8 kTimingClock[] = { 0xf8 };
+const uint8 kBrokenData1[] = { 0x90 };
+const uint8 kBrokenData2[] = { 0xf7 };
+const uint8 kBrokenData3[] = { 0xf2, 0x00 };
+const uint8 kDataByte0[] = { 0x00 };
+
+template <typename T, size_t N>
+void Add(MIDIMessageQueue* queue, const T(&array)[N]) {
+ queue->Add(array, N);
+}
+
+template <typename T, size_t N>
+::testing::AssertionResult ExpectEqualSequence(
+ const char* expr1, const char* expr2,
+ const T(&expected)[N], const std::vector<T>& actual) {
+ if (actual.size() != N) {
+ return ::testing::AssertionFailure()
+ << "expected: " << ::testing::PrintToString(expected)
+ << ", actual: " << ::testing::PrintToString(actual);
+ }
+ for (size_t i = 0; i < N; ++i) {
+ if (expected[i] != actual[i]) {
+ return ::testing::AssertionFailure()
+ << "expected: " << ::testing::PrintToString(expected)
+ << ", actual: " << ::testing::PrintToString(actual);
+ }
+ }
+ return ::testing::AssertionSuccess();
+}
+
+#define EXPECT_MESSAGE(expected, actual) \
+ EXPECT_PRED_FORMAT2(ExpectEqualSequence, expected, actual)
+
+TEST(MIDIMessageQueueTest, EmptyData) {
+ MIDIMessageQueue queue(false);
+ std::vector<uint8> message;
+ queue.Get(&message);
+ EXPECT_TRUE(message.empty());
+}
+
+TEST(MIDIMessageQueueTest, RunningStatusDisabled) {
+ MIDIMessageQueue queue(false);
+ Add(&queue, kGMOn);
+ Add(&queue, kBrokenData1);
+ Add(&queue, kNoteOnWithRunningStatus);
+ Add(&queue, kBrokenData2);
+ Add(&queue, kChannelPressureWithRunningStatus);
+ Add(&queue, kBrokenData3);
+ Add(&queue, kNoteOn);
+ Add(&queue, kBrokenData1);
+ Add(&queue, kGSOn);
+ Add(&queue, kBrokenData2);
+ Add(&queue, kTimingClock);
+ Add(&queue, kBrokenData3);
+
+ std::vector<uint8> message;
+ queue.Get(&message);
+ EXPECT_MESSAGE(kGMOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message) << "Running status should be ignored";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kChannelPressure, message)
+ << "Running status should be ignored";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kGSOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_TRUE(message.empty());
+}
+
+TEST(MIDIMessageQueueTest, RunningStatusEnabled) {
+ MIDIMessageQueue queue(true);
+ Add(&queue, kGMOn);
+ Add(&queue, kBrokenData1);
+ Add(&queue, kNoteOnWithRunningStatus);
+ Add(&queue, kBrokenData2);
+ Add(&queue, kChannelPressureWithRunningStatus);
+ Add(&queue, kBrokenData3);
+ Add(&queue, kNoteOn);
+ Add(&queue, kBrokenData1);
+ Add(&queue, kGSOn);
+ Add(&queue, kBrokenData2);
+ Add(&queue, kTimingClock);
+ Add(&queue, kDataByte0);
+
+ std::vector<uint8> message;
+ queue.Get(&message);
+ EXPECT_MESSAGE(kGMOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message)
+ << "Running status should be converted into a canonical MIDI message";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message)
+ << "Running status should be converted into a canonical MIDI message";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kChannelPressure, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kChannelPressure, message)
+ << "Running status should be converted into a canonical MIDI message";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kChannelPressure, message)
+ << "Running status should be converted into a canonical MIDI message";
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kGSOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_TRUE(message.empty())
+ << "Running status must not be applied to real time messages";
+}
+
+TEST(MIDIMessageQueueTest, RunningStatusEnabledWithRealTimeEvent) {
+ MIDIMessageQueue queue(true);
+ const uint8 kNoteOnWithRunningStatusWithkTimingClock[] = {
+ 0x90, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8, 0x7f, 0xf8, 0x3c, 0xf8,
+ 0x7f,
+ };
+ Add(&queue, kNoteOnWithRunningStatusWithkTimingClock);
+ std::vector<uint8> message;
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kTimingClock, message);
+ queue.Get(&message);
+ EXPECT_MESSAGE(kNoteOn, message);
+ queue.Get(&message);
+ EXPECT_TRUE(message.empty());
+}
+
+} // namespace
+} // namespace media
diff --git a/chromium/media/midi/midi_message_util.cc b/chromium/media/midi/midi_message_util.cc
new file mode 100644
index 00000000000..83d3cc071d9
--- /dev/null
+++ b/chromium/media/midi/midi_message_util.cc
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_message_util.h"
+
+namespace media {
+
+size_t GetMIDIMessageLength(uint8 status_byte) {
+ if (status_byte < 0x80)
+ return 0;
+ if (0x80 <= status_byte && status_byte <= 0xbf)
+ return 3;
+ if (0xc0 <= status_byte && status_byte <= 0xdf)
+ return 2;
+ if (0xe0 <= status_byte && status_byte <= 0xef)
+ return 3;
+ if (status_byte == 0xf0)
+ return 0;
+ if (status_byte == 0xf1)
+ return 2;
+ if (status_byte == 0xf2)
+ return 3;
+ if (status_byte == 0xf3)
+ return 2;
+ if (0xf4 <= status_byte && status_byte <= 0xf6)
+ return 1;
+ if (status_byte == 0xf7)
+ return 0;
+ // 0xf8 <= status_byte && status_byte <= 0xff
+ return 1;
+}
+
+} // namespace media
diff --git a/chromium/media/midi/midi_message_util.h b/chromium/media/midi/midi_message_util.h
new file mode 100644
index 00000000000..1dc6d3cba78
--- /dev/null
+++ b/chromium/media/midi/midi_message_util.h
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MESSAGE_UTIL_H_
+#define MEDIA_MIDI_MIDI_MESSAGE_UTIL_H_
+
+#include <deque>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Returns the length of a MIDI message in bytes. Never returns 4 or greater.
+// Returns 0 if |status_byte| is:
+// - not a valid status byte, namely data byte.
+// - the MIDI System Exclusive message.
+// - the End of System Exclusive message.
+MEDIA_EXPORT size_t GetMIDIMessageLength(uint8 status_byte);
+
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MESSAGE_UTIL_H_
diff --git a/chromium/media/midi/midi_message_util_unittest.cc b/chromium/media/midi/midi_message_util_unittest.cc
new file mode 100644
index 00000000000..af3679c2987
--- /dev/null
+++ b/chromium/media/midi/midi_message_util_unittest.cc
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_message_util.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace {
+
+const uint8 kGMOn[] = { 0xf0, 0x7e, 0x7f, 0x09, 0x01, 0xf7 };
+const uint8 kNoteOn[] = { 0x90, 0x3c, 0x7f };
+const uint8 kChannelPressure[] = { 0xd0, 0x01 };
+const uint8 kTimingClock[] = { 0xf8 };
+
+TEST(GetMIDIMessageLengthTest, BasicTest) {
+ // Check basic functionarity
+ EXPECT_EQ(arraysize(kNoteOn), GetMIDIMessageLength(kNoteOn[0]));
+ EXPECT_EQ(arraysize(kChannelPressure),
+ GetMIDIMessageLength(kChannelPressure[0]));
+ EXPECT_EQ(arraysize(kTimingClock), GetMIDIMessageLength(kTimingClock[0]));
+
+ // SysEx message should be mapped to 0-length
+ EXPECT_EQ(0u, GetMIDIMessageLength(kGMOn[0]));
+
+ // Any data byte should be mapped to 0-length
+ EXPECT_EQ(0u, GetMIDIMessageLength(kGMOn[1]));
+ EXPECT_EQ(0u, GetMIDIMessageLength(kNoteOn[1]));
+ EXPECT_EQ(0u, GetMIDIMessageLength(kChannelPressure[1]));
+}
+
+} // namespace
+} // namespace media
diff --git a/chromium/media/mp2t/es_parser_adts.cc b/chromium/media/mp2t/es_parser_adts.cc
index b7578360b69..85de023e8fa 100644
--- a/chromium/media/mp2t/es_parser_adts.cc
+++ b/chromium/media/mp2t/es_parser_adts.cc
@@ -135,9 +135,11 @@ namespace mp2t {
EsParserAdts::EsParserAdts(
const NewAudioConfigCB& new_audio_config_cb,
- const EmitBufferCB& emit_buffer_cb)
+ const EmitBufferCB& emit_buffer_cb,
+ bool sbr_in_mimetype)
: new_audio_config_cb_(new_audio_config_cb),
- emit_buffer_cb_(emit_buffer_cb) {
+ emit_buffer_cb_(emit_buffer_cb),
+ sbr_in_mimetype_(sbr_in_mimetype) {
}
EsParserAdts::~EsParserAdts() {
@@ -247,16 +249,25 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
int samples_per_second = adts_frequency_table[frequency_index];
int adts_profile = (adts_header[2] >> 6) & 0x3;
+ // The following code is written according to ISO 14496 Part 3 Table 1.11 and
+ // Table 1.22. (Table 1.11 refers to the capping to 48000, Table 1.22 refers
+ // to SBR doubling the AAC sample rate.)
+ // TODO(damienv) : Extend sample rate cap to 96kHz for Level 5 content.
+ int extended_samples_per_second = sbr_in_mimetype_
+ ? std::min(2 * samples_per_second, 48000)
+ : samples_per_second;
+
AudioDecoderConfig audio_decoder_config(
kCodecAAC,
kSampleFormatS16,
adts_channel_layout[channel_configuration],
- samples_per_second,
+ extended_samples_per_second,
NULL, 0,
false);
if (!audio_decoder_config.Matches(last_audio_decoder_config_)) {
DVLOG(1) << "Sampling frequency: " << samples_per_second;
+ DVLOG(1) << "Extended sampling frequency: " << extended_samples_per_second;
DVLOG(1) << "Channel config: " << channel_configuration;
DVLOG(1) << "Adts profile: " << adts_profile;
// Reset the timestamp helper to use a new time scale.
diff --git a/chromium/media/mp2t/es_parser_adts.h b/chromium/media/mp2t/es_parser_adts.h
index fd0fe587c07..0fc619fdfb8 100644
--- a/chromium/media/mp2t/es_parser_adts.h
+++ b/chromium/media/mp2t/es_parser_adts.h
@@ -30,7 +30,8 @@ class EsParserAdts : public EsParser {
typedef base::Callback<void(const AudioDecoderConfig&)> NewAudioConfigCB;
EsParserAdts(const NewAudioConfigCB& new_audio_config_cb,
- const EmitBufferCB& emit_buffer_cb);
+ const EmitBufferCB& emit_buffer_cb,
+ bool sbr_in_mimetype);
virtual ~EsParserAdts();
// EsParser implementation.
@@ -59,6 +60,10 @@ class EsParserAdts : public EsParser {
NewAudioConfigCB new_audio_config_cb_;
EmitBufferCB emit_buffer_cb_;
+ // True when AAC SBR extension is signalled in the mimetype
+ // (mp4a.40.5 in the codecs parameter).
+ bool sbr_in_mimetype_;
+
// Bytes of the ES stream that have not been emitted yet.
ByteQueue es_byte_queue_;
diff --git a/chromium/media/mp2t/es_parser_h264.cc b/chromium/media/mp2t/es_parser_h264.cc
index 99c28893b4a..30764c91e6d 100644
--- a/chromium/media/mp2t/es_parser_h264.cc
+++ b/chromium/media/mp2t/es_parser_h264.cc
@@ -19,11 +19,11 @@ static const int kExtendedSar = 255;
// ISO 14496 part 10
// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
static const int kTableSarWidth[14] = {
- 1, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160
+ 0, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160
};
static const int kTableSarHeight[14] = {
- 1, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99
+ 0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99
};
// Remove the start code emulation prevention ( 0x000003 )
@@ -449,12 +449,8 @@ bool EsParserH264::ProcessSPS(const uint8* buf, int size) {
}
}
- if (sar_width != sar_height) {
- // TODO(damienv): Support non square pixels.
- DVLOG(1)
- << "Non square pixel not supported yet:"
- << " sar_width=" << sar_width
- << " sar_height=" << sar_height;
+ if (sar_width == 0 || sar_height == 0) {
+ DVLOG(1) << "Unspecified SAR not supported";
return false;
}
@@ -467,11 +463,12 @@ bool EsParserH264::ProcessSPS(const uint8* buf, int size) {
frame_crop_top_offset,
(coded_size.width() - frame_crop_right_offset) - frame_crop_left_offset,
(coded_size.height() - frame_crop_bottom_offset) - frame_crop_top_offset);
-
- // TODO(damienv): calculate the natural size based
- // on the possible aspect ratio coded in the VUI parameters.
- gfx::Size natural_size(visible_rect.width(),
+ if (visible_rect.width() <= 0 || visible_rect.height() <= 0)
+ return false;
+ gfx::Size natural_size((visible_rect.width() * sar_width) / sar_height,
visible_rect.height());
+ if (natural_size.width() == 0)
+ return false;
// TODO(damienv):
// Assuming the SPS is used right away by the PPS
@@ -495,6 +492,7 @@ bool EsParserH264::ProcessSPS(const uint8* buf, int size) {
DVLOG(1) << "Pic width: " << (pic_width_in_mbs_minus1 + 1) * 16;
DVLOG(1) << "Pic height: " << (pic_height_in_map_units_minus1 + 1) * 16;
DVLOG(1) << "log2_max_frame_num_minus4: " << log2_max_frame_num_minus4;
+ DVLOG(1) << "SAR: width=" << sar_width << " height=" << sar_height;
last_video_decoder_config_ = video_decoder_config;
new_video_config_cb_.Run(video_decoder_config);
}
diff --git a/chromium/media/mp2t/mp2t_stream_parser.cc b/chromium/media/mp2t/mp2t_stream_parser.cc
index 68fca5cedd2..4a22f37d576 100644
--- a/chromium/media/mp2t/mp2t_stream_parser.cc
+++ b/chromium/media/mp2t/mp2t_stream_parser.cc
@@ -10,6 +10,7 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/mp2t/es_parser.h"
#include "media/mp2t/es_parser_adts.h"
@@ -150,8 +151,9 @@ Mp2tStreamParser::BufferQueueWithConfig::BufferQueueWithConfig(
Mp2tStreamParser::BufferQueueWithConfig::~BufferQueueWithConfig() {
}
-Mp2tStreamParser::Mp2tStreamParser()
- : selected_audio_pid_(-1),
+Mp2tStreamParser::Mp2tStreamParser(bool sbr_in_mimetype)
+ : sbr_in_mimetype_(sbr_in_mimetype),
+ selected_audio_pid_(-1),
selected_video_pid_(-1),
is_initialized_(false),
segment_started_(false),
@@ -166,9 +168,8 @@ void Mp2tStreamParser::Init(
const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
- const NewTextBuffersCB& text_cb,
+ const NewTextBuffersCB& /* text_cb */ ,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
@@ -346,7 +347,8 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
pes_pid),
base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer,
base::Unretained(this),
- pes_pid)));
+ pes_pid),
+ sbr_in_mimetype_));
is_audio = true;
} else {
return;
@@ -391,7 +393,7 @@ void Mp2tStreamParser::UpdatePidFilter() {
selected_audio_pid_ = lowest_audio_pid->first;
}
if (lowest_video_pid != pids_.end()) {
- DVLOG(1) << "Enable video pid: " << lowest_audio_pid->first;
+ DVLOG(1) << "Enable video pid: " << lowest_video_pid->first;
lowest_video_pid->second->Enable();
selected_video_pid_ = lowest_video_pid->first;
}
@@ -476,7 +478,8 @@ bool Mp2tStreamParser::FinishInitializationIfNeeded() {
// Pass the config before invoking the initialization callback.
RCHECK(config_cb_.Run(queue_with_config.audio_config,
- queue_with_config.video_config));
+ queue_with_config.video_config,
+ TextTrackConfigMap()));
queue_with_config.is_config_sent = true;
// For Mpeg2 TS, the duration is not known.
@@ -585,7 +588,8 @@ bool Mp2tStreamParser::EmitRemainingBuffers() {
BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
if (!queue_with_config.is_config_sent) {
if (!config_cb_.Run(queue_with_config.audio_config,
- queue_with_config.video_config))
+ queue_with_config.video_config,
+ TextTrackConfigMap()))
return false;
queue_with_config.is_config_sent = true;
}
diff --git a/chromium/media/mp2t/mp2t_stream_parser.h b/chromium/media/mp2t/mp2t_stream_parser.h
index dcab5595ff8..11e48d19b1b 100644
--- a/chromium/media/mp2t/mp2t_stream_parser.h
+++ b/chromium/media/mp2t/mp2t_stream_parser.h
@@ -26,7 +26,7 @@ class PidState;
class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
public:
- Mp2tStreamParser();
+ explicit Mp2tStreamParser(bool sbr_in_mimetype);
virtual ~Mp2tStreamParser();
// StreamParser implementation.
@@ -35,7 +35,6 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
@@ -102,6 +101,10 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
base::Closure end_of_segment_cb_;
LogCB log_cb_;
+ // True when AAC SBR extension is signalled in the mimetype
+ // (mp4a.40.5 in the codecs parameter).
+ bool sbr_in_mimetype_;
+
// Bytes of the TS stream.
ByteQueue ts_byte_queue_;
diff --git a/chromium/media/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
index 12a3b9519da..fab0a008102 100644
--- a/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/mp2t/mp2t_stream_parser_unittest.cc
@@ -14,6 +14,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/mp2t/mp2t_stream_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -28,7 +29,8 @@ class Mp2tStreamParserTest : public testing::Test {
video_frame_count_(0),
video_min_dts_(kNoTimestamp()),
video_max_dts_(kNoTimestamp()) {
- parser_.reset(new Mp2tStreamParser());
+ bool has_sbr = false;
+ parser_.reset(new Mp2tStreamParser(has_sbr));
}
protected:
@@ -60,7 +62,9 @@ class Mp2tStreamParserTest : public testing::Test {
<< ", dur=" << duration.InMilliseconds();
}
- bool OnNewConfig(const AudioDecoderConfig& ac, const VideoDecoderConfig& vc) {
+ bool OnNewConfig(const AudioDecoderConfig& ac,
+ const VideoDecoderConfig& vc,
+ const StreamParser::TextTrackConfigMap& tc) {
DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
return true;
@@ -101,23 +105,11 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
- bool OnNewTextBuffers(TextTrack* text_track,
- const StreamParser::BufferQueue& buffers) {
- return true;
- }
-
void OnKeyNeeded(const std::string& type,
const std::vector<uint8>& init_data) {
DVLOG(1) << "OnKeyNeeded: " << init_data.size();
}
- scoped_ptr<TextTrack> OnAddTextTrack(
- TextKind kind,
- const std::string& label,
- const std::string& language) {
- return scoped_ptr<TextTrack>();
- }
-
void OnNewSegment() {
DVLOG(1) << "OnNewSegment";
}
@@ -134,12 +126,9 @@ class Mp2tStreamParserTest : public testing::Test {
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewBuffers,
base::Unretained(this)),
- base::Bind(&Mp2tStreamParserTest::OnNewTextBuffers,
- base::Unretained(this)),
+ StreamParser::NewTextBuffersCB(),
base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
base::Unretained(this)),
- base::Bind(&Mp2tStreamParserTest::OnAddTextTrack,
- base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewSegment,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnEndOfSegment,
diff --git a/chromium/media/mp3/mp3_stream_parser.cc b/chromium/media/mp3/mp3_stream_parser.cc
index 0688d99fcc9..b20756cd228 100644
--- a/chromium/media/mp3/mp3_stream_parser.cc
+++ b/chromium/media/mp3/mp3_stream_parser.cc
@@ -10,6 +10,7 @@
#include "media/base/bit_reader.h"
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "net/http/http_util.h"
@@ -96,7 +97,6 @@ static const int kSampleRateMap[4][4] = {
};
// Frame header field constants.
-static const int kVersion1 = 3;
static const int kVersion2 = 2;
static const int kVersionReserved = 1;
static const int kVersion2_5 = 0;
@@ -120,7 +120,6 @@ void MP3StreamParser::Init(const InitCB& init_cb,
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
@@ -164,7 +163,7 @@ bool MP3StreamParser::Parse(const uint8* buf, int size) {
int data_size;
queue_.Peek(&data, &data_size);
- if (size < 4)
+ if (data_size < 4)
break;
uint32 start_code = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3];
@@ -411,7 +410,7 @@ int MP3StreamParser::ParseMP3Frame(const uint8* data,
timestamp_helper_->SetBaseTimestamp(base_timestamp);
VideoDecoderConfig video_config;
- bool success = config_cb_.Run(config_, video_config);
+ bool success = config_cb_.Run(config_, video_config, TextTrackConfigMap());
if (!init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(success, kInfiniteDuration());
diff --git a/chromium/media/mp3/mp3_stream_parser.h b/chromium/media/mp3/mp3_stream_parser.h
index 97730ae6e82..1e2e8c6f5b3 100644
--- a/chromium/media/mp3/mp3_stream_parser.h
+++ b/chromium/media/mp3/mp3_stream_parser.h
@@ -30,7 +30,6 @@ class MEDIA_EXPORT MP3StreamParser : public StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
diff --git a/chromium/media/mp3/mp3_stream_parser_unittest.cc b/chromium/media/mp3/mp3_stream_parser_unittest.cc
index 9d309544af6..f565093cd5b 100644
--- a/chromium/media/mp3/mp3_stream_parser_unittest.cc
+++ b/chromium/media/mp3/mp3_stream_parser_unittest.cc
@@ -7,6 +7,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/mp3/mp3_stream_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,7 +45,8 @@ class MP3StreamParserTest : public testing::Test {
}
bool OnNewConfig(const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config) {
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_config) {
DVLOG(1) << __FUNCTION__ << "(" << audio_config.IsValidConfig() << ", "
<< video_config.IsValidConfig() << ")";
EXPECT_TRUE(audio_config.IsValidConfig());
@@ -79,22 +81,11 @@ class MP3StreamParserTest : public testing::Test {
return true;
}
- bool OnNewTextBuffers(TextTrack* text_track,
- const StreamParser::BufferQueue& buffers) {
- return true;
- }
-
void OnKeyNeeded(const std::string& type,
const std::vector<uint8>& init_data) {
DVLOG(1) << __FUNCTION__ << "(" << type << ", " << init_data.size() << ")";
}
- scoped_ptr<TextTrack> OnAddTextTrack(TextKind kind,
- const std::string& label,
- const std::string& language) {
- return scoped_ptr<TextTrack>();
- }
-
void OnNewSegment() {
DVLOG(1) << __FUNCTION__;
results_stream_ << "NewSegment";
@@ -110,11 +101,8 @@ class MP3StreamParserTest : public testing::Test {
base::Bind(&MP3StreamParserTest::OnInitDone, base::Unretained(this)),
base::Bind(&MP3StreamParserTest::OnNewConfig, base::Unretained(this)),
base::Bind(&MP3StreamParserTest::OnNewBuffers, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnNewTextBuffers,
- base::Unretained(this)),
+ StreamParser::NewTextBuffersCB(),
base::Bind(&MP3StreamParserTest::OnKeyNeeded, base::Unretained(this)),
- base::Bind(&MP3StreamParserTest::OnAddTextTrack,
- base::Unretained(this)),
base::Bind(&MP3StreamParserTest::OnNewSegment, base::Unretained(this)),
base::Bind(&MP3StreamParserTest::OnEndOfSegment,
base::Unretained(this)),
@@ -165,6 +153,8 @@ TEST_F(MP3StreamParserTest, UnalignedAppend512) {
"NewSegment"
"{ 0K }"
"{ 26K 52K 78K 104K }"
+ "EndOfSegment"
+ "NewSegment"
"{ 130K 156K 182K }"
"{ 208K 235K 261K 287K }"
"{ 313K }"
diff --git a/chromium/media/mp4/box_definitions.cc b/chromium/media/mp4/box_definitions.cc
index e7f169323bb..74d216f300e 100644
--- a/chromium/media/mp4/box_definitions.cc
+++ b/chromium/media/mp4/box_definitions.cc
@@ -399,13 +399,18 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
}
}
- if (format == FOURCC_AVC1 ||
- (format == FOURCC_ENCV && sinf.format.format == FOURCC_AVC1)) {
+ if (IsFormatValid())
RCHECK(reader->ReadChild(&avcc));
- }
+
return true;
}
+bool VideoSampleEntry::IsFormatValid() const {
+ return format == FOURCC_AVC1 || format == FOURCC_AVC3 ||
+ (format == FOURCC_ENCV && (sinf.format.format == FOURCC_AVC1 ||
+ sinf.format.format == FOURCC_AVC3));
+}
+
ElementaryStreamDescriptor::ElementaryStreamDescriptor()
: object_type(kForbidden) {}
@@ -738,7 +743,8 @@ bool TrackFragment::Parse(BoxReader* reader) {
reader->ReadChild(&decode_time) &&
reader->MaybeReadChildren(&runs) &&
reader->MaybeReadChild(&auxiliary_offset) &&
- reader->MaybeReadChild(&auxiliary_size);
+ reader->MaybeReadChild(&auxiliary_size) &&
+ reader->MaybeReadChild(&sdtp);
}
MovieFragment::MovieFragment() {}
@@ -753,5 +759,38 @@ bool MovieFragment::Parse(BoxReader* reader) {
return true;
}
+IndependentAndDisposableSamples::IndependentAndDisposableSamples() {}
+IndependentAndDisposableSamples::~IndependentAndDisposableSamples() {}
+FourCC IndependentAndDisposableSamples::BoxType() const { return FOURCC_SDTP; }
+
+bool IndependentAndDisposableSamples::Parse(BoxReader* reader) {
+ RCHECK(reader->ReadFullBoxHeader());
+ RCHECK(reader->version() == 0);
+ RCHECK(reader->flags() == 0);
+
+ int sample_count = reader->size() - reader->pos();
+ sample_depends_on_.resize(sample_count);
+ for (int i = 0; i < sample_count; ++i) {
+ uint8 sample_info;
+ RCHECK(reader->Read1(&sample_info));
+ RCHECK((sample_info >> 6) == 0); // reserved.
+
+ sample_depends_on_[i] =
+ static_cast<SampleDependsOn>((sample_info >> 4) & 0x3);
+
+ RCHECK(sample_depends_on_[i] != kSampleDependsOnReserved);
+ }
+
+ return true;
+}
+
+SampleDependsOn IndependentAndDisposableSamples::sample_depends_on(
+ size_t i) const {
+ if (i >= sample_depends_on_.size())
+ return kSampleDependsOnUnknown;
+
+ return sample_depends_on_[i];
+}
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/mp4/box_definitions.h b/chromium/media/mp4/box_definitions.h
index eab8c4f4105..74999612cef 100644
--- a/chromium/media/mp4/box_definitions.h
+++ b/chromium/media/mp4/box_definitions.h
@@ -183,6 +183,8 @@ struct MEDIA_EXPORT VideoSampleEntry : Box {
// Currently expected to be present regardless of format.
AVCDecoderConfigurationRecord avcc;
+
+ bool IsFormatValid() const;
};
struct MEDIA_EXPORT ElementaryStreamDescriptor : Box {
@@ -325,6 +327,27 @@ struct MEDIA_EXPORT TrackFragmentRun : Box {
std::vector<int32> sample_composition_time_offsets;
};
+// sample_depends_on values in ISO/IEC 14496-12 Section 8.40.2.3.
+enum SampleDependsOn {
+ kSampleDependsOnUnknown = 0,
+ kSampleDependsOnOthers = 1,
+ kSampleDependsOnNoOther = 2,
+ kSampleDependsOnReserved = 3,
+};
+
+class MEDIA_EXPORT IndependentAndDisposableSamples : public Box {
+ public:
+ DECLARE_BOX_METHODS(IndependentAndDisposableSamples);
+
+ // Returns the SampleDependsOn value for the |i|'th value
+ // in the track. If no data was parsed for the |i|'th sample,
+ // then |kSampleDependsOnUnknown| is returned.
+ SampleDependsOn sample_depends_on(size_t i) const;
+
+ private:
+ std::vector<SampleDependsOn> sample_depends_on_;
+};
+
struct MEDIA_EXPORT TrackFragment : Box {
DECLARE_BOX_METHODS(TrackFragment);
@@ -333,6 +356,7 @@ struct MEDIA_EXPORT TrackFragment : Box {
TrackFragmentDecodeTime decode_time;
SampleAuxiliaryInformationOffset auxiliary_offset;
SampleAuxiliaryInformationSize auxiliary_size;
+ IndependentAndDisposableSamples sdtp;
};
struct MEDIA_EXPORT MovieFragment : Box {
diff --git a/chromium/media/mp4/fourccs.h b/chromium/media/mp4/fourccs.h
index b71d2ff3a5d..01cce2bcf1b 100644
--- a/chromium/media/mp4/fourccs.h
+++ b/chromium/media/mp4/fourccs.h
@@ -13,6 +13,7 @@ namespace mp4 {
enum FourCC {
FOURCC_NULL = 0,
FOURCC_AVC1 = 0x61766331,
+ FOURCC_AVC3 = 0x61766333,
FOURCC_AVCC = 0x61766343,
FOURCC_BLOC = 0x626C6F63,
FOURCC_CENC = 0x63656e63,
diff --git a/chromium/media/mp4/mp4_stream_parser.cc b/chromium/media/mp4/mp4_stream_parser.cc
index 26cee44d14e..db1b59b4572 100644
--- a/chromium/media/mp4/mp4_stream_parser.cc
+++ b/chromium/media/mp4/mp4_stream_parser.cc
@@ -10,6 +10,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
#include "media/mp4/box_definitions.h"
@@ -45,7 +46,6 @@ void MP4StreamParser::Init(const InitCB& init_cb,
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& /* text_cb */ ,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& /* add_text_track_cb */ ,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
@@ -268,9 +268,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
desc_idx = 0;
const VideoSampleEntry& entry = samp_descr.video_entries[desc_idx];
- if (!(entry.format == FOURCC_AVC1 ||
- (entry.format == FOURCC_ENCV &&
- entry.sinf.format.format == FOURCC_AVC1))) {
+ if (!entry.IsFormatValid()) {
MEDIA_LOG(log_cb_) << "Unsupported video format 0x"
<< std::hex << entry.format << " in stsd box.";
return false;
@@ -294,7 +292,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
}
}
- RCHECK(config_cb_.Run(audio_config, video_config));
+ RCHECK(config_cb_.Run(audio_config, video_config, TextTrackConfigMap()));
base::TimeDelta duration;
if (moov_->extends.header.fragment_duration > 0) {
diff --git a/chromium/media/mp4/mp4_stream_parser.h b/chromium/media/mp4/mp4_stream_parser.h
index 81139d52706..946513f0445 100644
--- a/chromium/media/mp4/mp4_stream_parser.h
+++ b/chromium/media/mp4/mp4_stream_parser.h
@@ -32,7 +32,6 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
diff --git a/chromium/media/mp4/mp4_stream_parser_unittest.cc b/chromium/media/mp4/mp4_stream_parser_unittest.cc
index 816a2106e39..dd394c4f17e 100644
--- a/chromium/media/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/mp4/mp4_stream_parser_unittest.cc
@@ -14,6 +14,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/mp4/es_descriptor.h"
#include "media/mp4/mp4_stream_parser.h"
@@ -62,7 +63,9 @@ class MP4StreamParserTest : public testing::Test {
<< ", dur=" << duration.InMilliseconds();
}
- bool NewConfigF(const AudioDecoderConfig& ac, const VideoDecoderConfig& vc) {
+ bool NewConfigF(const AudioDecoderConfig& ac,
+ const VideoDecoderConfig& vc,
+ const StreamParser::TextTrackConfigMap& tc) {
DVLOG(1) << "NewConfigF: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
configs_received_ = true;
@@ -88,11 +91,6 @@ class MP4StreamParserTest : public testing::Test {
return true;
}
- bool NewTextBuffersF(TextTrack* text_track,
- const StreamParser::BufferQueue& buffers) {
- return true;
- }
-
void KeyNeededF(const std::string& type,
const std::vector<uint8>& init_data) {
DVLOG(1) << "KeyNeededF: " << init_data.size();
@@ -100,13 +98,6 @@ class MP4StreamParserTest : public testing::Test {
EXPECT_FALSE(init_data.empty());
}
- scoped_ptr<TextTrack> AddTextTrackF(
- TextKind kind,
- const std::string& label,
- const std::string& language) {
- return scoped_ptr<TextTrack>();
- }
-
void NewSegmentF() {
DVLOG(1) << "NewSegmentF";
}
@@ -120,10 +111,8 @@ class MP4StreamParserTest : public testing::Test {
base::Bind(&MP4StreamParserTest::InitF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewConfigF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewBuffersF, base::Unretained(this)),
- base::Bind(&MP4StreamParserTest::NewTextBuffersF,
- base::Unretained(this)),
+ StreamParser::NewTextBuffersCB(),
base::Bind(&MP4StreamParserTest::KeyNeededF, base::Unretained(this)),
- base::Bind(&MP4StreamParserTest::AddTextTrackF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewSegmentF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::EndOfSegmentF,
base::Unretained(this)),
diff --git a/chromium/media/mp4/track_run_iterator.cc b/chromium/media/mp4/track_run_iterator.cc
index 95dab69ea4f..4dbd14f9f3f 100644
--- a/chromium/media/mp4/track_run_iterator.cc
+++ b/chromium/media/mp4/track_run_iterator.cc
@@ -76,7 +76,8 @@ static void PopulateSampleInfo(const TrackExtends& trex,
const TrackFragmentRun& trun,
const int64 edit_list_offset,
const uint32 i,
- SampleInfo* sample_info) {
+ SampleInfo* sample_info,
+ const SampleDependsOn sample_depends_on) {
if (i < trun.sample_sizes.size()) {
sample_info->size = trun.sample_sizes[i];
} else if (tfhd.default_sample_size > 0) {
@@ -108,7 +109,23 @@ static void PopulateSampleInfo(const TrackExtends& trex,
} else {
flags = trex.default_sample_flags;
}
- sample_info->is_keyframe = !(flags & kSampleIsDifferenceSampleFlagMask);
+
+ switch (sample_depends_on) {
+ case kSampleDependsOnUnknown:
+ sample_info->is_keyframe = !(flags & kSampleIsDifferenceSampleFlagMask);
+ break;
+
+ case kSampleDependsOnOthers:
+ sample_info->is_keyframe = false;
+ break;
+
+ case kSampleDependsOnNoOther:
+ sample_info->is_keyframe = true;
+ break;
+
+ case kSampleDependsOnReserved:
+ CHECK(false);
+ }
}
// In well-structured encrypted media, each track run will be immediately
@@ -249,7 +266,7 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.samples.resize(trun.sample_count);
for (size_t k = 0; k < trun.sample_count; k++) {
PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset,
- k, &tri.samples[k]);
+ k, &tri.samples[k], traf.sdtp.sample_depends_on(k));
run_start_dts += tri.samples[k].duration;
}
runs_.push_back(tri);
diff --git a/chromium/media/shared_memory_support.gypi b/chromium/media/shared_memory_support.gypi
index 0f1c53acb85..65403f748d8 100644
--- a/chromium/media/shared_memory_support.gypi
+++ b/chromium/media/shared_memory_support.gypi
@@ -10,8 +10,6 @@
'shared_memory_support_sources': [
'audio/audio_parameters.cc',
'audio/audio_parameters.h',
- 'audio/shared_memory_util.cc',
- 'audio/shared_memory_util.h',
'base/audio_bus.cc',
'base/audio_bus.h',
'base/channel_layout.cc',
diff --git a/chromium/media/tools/layout_tests/layouttest_analyzer_helpers.py b/chromium/media/tools/layout_tests/layouttest_analyzer_helpers.py
index 2eb3c158789..e5791bc62a6 100644
--- a/chromium/media/tools/layout_tests/layouttest_analyzer_helpers.py
+++ b/chromium/media/tools/layout_tests/layouttest_analyzer_helpers.py
@@ -150,7 +150,7 @@ class AnalyzerResultMap:
if delta <= 0:
raise ValueError('The number of tests in test group "whole" is equal or '
'less than that of "skip"')
- return 100 - len(self.result_map['nonskip'].keys()) * 100 / delta
+ return 100 - len(self.result_map['nonskip'].keys()) * 100.0 / delta
def ConvertToCSVText(self, current_time):
"""Convert |self.result_map| into stats and issues text in CSV format.
@@ -214,7 +214,7 @@ class AnalyzerResultMap:
'<li>The number of tests: %d (%s)</li>'
'<li>The number of failing skipped tests: %d (%s)</li>'
'<li>The number of failing non-skipped tests: %d (%s)</li>'
- '<li>Passing rate: %d %%</li></ul>') % (
+ '<li>Passing rate: %.2f %%</li></ul>') % (
prev_time, len(self.result_map['whole'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['whole'], 'whole'),
len(self.result_map['skip'].keys()),
diff --git a/chromium/media/tools/player_x11/gl_video_renderer.cc b/chromium/media/tools/player_x11/gl_video_renderer.cc
index 9e508087a0c..3966d81afaf 100644
--- a/chromium/media/tools/player_x11/gl_video_renderer.cc
+++ b/chromium/media/tools/player_x11/gl_video_renderer.cc
@@ -67,14 +67,6 @@ static const float kVertices[8] = {
1.f, -1.f,
};
-// Texture Coordinates mapping the entire texture.
-static const float kTextureCoords[8] = {
- 0, 0,
- 0, 1,
- 1, 0,
- 1, 1,
-};
-
// Pass-through vertex shader.
static const char kVertexShader[] =
"varying vec2 interp_tc;\n"
@@ -148,7 +140,7 @@ void GlVideoRenderer::Paint(media::VideoFrame* video_frame) {
void GlVideoRenderer::Initialize(gfx::Size coded_size, gfx::Rect visible_rect) {
CHECK(!gl_context_);
- LOG(INFO) << "Initializing GL Renderer...";
+ VLOG(0) << "Initializing GL Renderer...";
// Resize the window to fit that of the video.
XResizeWindow(display_, window_, visible_rect.width(), visible_rect.height());
diff --git a/chromium/media/tools/player_x11/player_x11.cc b/chromium/media/tools/player_x11/player_x11.cc
index c154e6937e2..fe3beec5a8b 100644
--- a/chromium/media/tools/player_x11/player_x11.cc
+++ b/chromium/media/tools/player_x11/player_x11.cc
@@ -28,7 +28,7 @@
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/video_renderer_base.h"
+#include "media/filters/video_renderer_impl.h"
#include "media/tools/player_x11/data_source_logger.h"
// Include X11 headers here because X11/Xlib.h #define's Status
@@ -121,7 +121,7 @@ void InitPipeline(media::Pipeline* pipeline,
ScopedVector<media::VideoDecoder> video_decoders;
video_decoders.push_back(new media::FFmpegVideoDecoder(message_loop));
- scoped_ptr<media::VideoRenderer> video_renderer(new media::VideoRendererBase(
+ scoped_ptr<media::VideoRenderer> video_renderer(new media::VideoRendererImpl(
message_loop,
video_decoders.Pass(),
media::SetDecryptorReadyCB(),
@@ -136,8 +136,7 @@ void InitPipeline(media::Pipeline* pipeline,
message_loop,
new media::NullAudioSink(message_loop),
audio_decoders.Pass(),
- media::SetDecryptorReadyCB(),
- true));
+ media::SetDecryptorReadyCB()));
collection->SetAudioRenderer(audio_renderer.Pass());
base::WaitableEvent event(true, false);
@@ -246,7 +245,8 @@ int main(int argc, char** argv) {
return 1;
}
- scoped_ptr<media::AudioManager> audio_manager(media::AudioManager::Create());
+ scoped_ptr<media::AudioManager> audio_manager(
+ media::AudioManager::CreateForTesting());
g_audio_manager = audio_manager.get();
logging::LoggingSettings settings;
diff --git a/chromium/media/tools/player_x11/x11_video_renderer.cc b/chromium/media/tools/player_x11/x11_video_renderer.cc
index c96d4663e6f..907e1abb5f6 100644
--- a/chromium/media/tools/player_x11/x11_video_renderer.cc
+++ b/chromium/media/tools/player_x11/x11_video_renderer.cc
@@ -16,7 +16,7 @@
// Creates a 32-bit XImage.
static XImage* CreateImage(Display* display, int width, int height) {
- LOG(INFO) << "Allocating XImage " << width << "x" << height;
+ VLOG(0) << "Allocating XImage " << width << "x" << height;
return XCreateImage(display,
DefaultVisual(display, DefaultScreen(display)),
DefaultDepth(display, DefaultScreen(display)),
@@ -181,7 +181,7 @@ void X11VideoRenderer::Paint(media::VideoFrame* video_frame) {
void X11VideoRenderer::Initialize(gfx::Size coded_size,
gfx::Rect visible_rect) {
CHECK(!image_);
- LOG(INFO) << "Initializing X11 Renderer...";
+ VLOG(0) << "Initializing X11 Renderer...";
// Resize the window to fit that of the video.
XResizeWindow(display_, window_, visible_rect.width(), visible_rect.height());
@@ -194,7 +194,7 @@ void X11VideoRenderer::Initialize(gfx::Size coded_size,
use_render_ = XRenderQueryExtension(display_, &dummy, &dummy);
if (use_render_) {
- LOG(INFO) << "Using XRender extension.";
+ VLOG(0) << "Using XRender extension.";
// If we are using XRender, we'll create a picture representing the
// window.
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/video/capture/android/video_capture_device_android.cc
index 141a5d0fae2..adfa9a3455c 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_android.cc
@@ -53,6 +53,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
const std::string VideoCaptureDevice::Name::GetModel() const {
// Android cameras are not typically USB devices, and this method is currently
// only used for USB model identifiers, so this implementation just indicates
@@ -80,15 +86,10 @@ bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
}
VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
- : state_(kIdle),
- got_first_frame_(false),
- observer_(NULL),
- device_name_(device_name),
- current_settings_() {
-}
+ : state_(kIdle), got_first_frame_(false), device_name_(device_name) {}
VideoCaptureDeviceAndroid::~VideoCaptureDeviceAndroid() {
- DeAllocate();
+ StopAndDeAllocate();
}
bool VideoCaptureDeviceAndroid::Init() {
@@ -100,78 +101,60 @@ bool VideoCaptureDeviceAndroid::Init() {
j_capture_.Reset(Java_VideoCapture_createVideoCapture(
env, base::android::GetApplicationContext(), id,
- reinterpret_cast<jint>(this)));
+ reinterpret_cast<intptr_t>(this)));
return true;
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceAndroid::device_name() {
- return device_name_;
-}
-
-void VideoCaptureDeviceAndroid::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceAndroid::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<Client> client) {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::AllocateAndStart";
{
base::AutoLock lock(lock_);
if (state_ != kIdle)
return;
- observer_ = observer;
- state_ = kAllocated;
+ client_ = client.Pass();
+ got_first_frame_ = false;
}
JNIEnv* env = AttachCurrentThread();
- jboolean ret = Java_VideoCapture_allocate(env,
- j_capture_.obj(),
- capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
+ jboolean ret =
+ Java_VideoCapture_allocate(env,
+ j_capture_.obj(),
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
if (!ret) {
SetErrorState("failed to allocate");
return;
}
// Store current width and height.
- current_settings_.width =
- Java_VideoCapture_queryWidth(env, j_capture_.obj());
- current_settings_.height =
- Java_VideoCapture_queryHeight(env, j_capture_.obj());
- current_settings_.frame_rate =
+ capture_format_.frame_size.SetSize(
+ Java_VideoCapture_queryWidth(env, j_capture_.obj()),
+ Java_VideoCapture_queryHeight(env, j_capture_.obj()));
+ capture_format_.frame_rate =
Java_VideoCapture_queryFrameRate(env, j_capture_.obj());
- current_settings_.color = GetColorspace();
- DCHECK_NE(current_settings_.color, media::PIXEL_FORMAT_UNKNOWN);
- CHECK(current_settings_.width > 0 && !(current_settings_.width % 2));
- CHECK(current_settings_.height > 0 && !(current_settings_.height % 2));
+ capture_format_.pixel_format = GetColorspace();
+ DCHECK_NE(capture_format_.pixel_format, media::PIXEL_FORMAT_UNKNOWN);
+ CHECK(capture_format_.frame_size.GetArea() > 0);
+ CHECK(!(capture_format_.frame_size.width() % 2));
+ CHECK(!(capture_format_.frame_size.height() % 2));
- if (capture_format.frame_rate > 0) {
+ if (capture_format_.frame_rate > 0) {
frame_interval_ = base::TimeDelta::FromMicroseconds(
- (base::Time::kMicrosecondsPerSecond + capture_format.frame_rate - 1) /
- capture_format.frame_rate);
+ (base::Time::kMicrosecondsPerSecond + capture_format_.frame_rate - 1) /
+ capture_format_.frame_rate);
}
- DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried width="
- << current_settings_.width
- << ", height="
- << current_settings_.height
- << ", frame_rate="
- << current_settings_.frame_rate;
- // Report the frame size to the observer.
- observer_->OnFrameInfo(current_settings_);
-}
+ DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried frame_size="
+ << capture_format_.frame_size.ToString()
+ << ", frame_rate=" << capture_format_.frame_rate;
-void VideoCaptureDeviceAndroid::Start() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::Start";
- {
- base::AutoLock lock(lock_);
- got_first_frame_ = false;
- DCHECK_EQ(state_, kAllocated);
- }
-
- JNIEnv* env = AttachCurrentThread();
-
- jint ret = Java_VideoCapture_startCapture(env, j_capture_.obj());
- if (ret < 0) {
+ jint result = Java_VideoCapture_startCapture(env, j_capture_.obj());
+ if (result < 0) {
SetErrorState("failed to start capture");
return;
}
@@ -182,14 +165,12 @@ void VideoCaptureDeviceAndroid::Start() {
}
}
-void VideoCaptureDeviceAndroid::Stop() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::Stop";
+void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::StopAndDeAllocate";
{
base::AutoLock lock(lock_);
if (state_ != kCapturing && state_ != kError)
return;
- if (state_ == kCapturing)
- state_ = kAllocated;
}
JNIEnv* env = AttachCurrentThread();
@@ -199,28 +180,13 @@ void VideoCaptureDeviceAndroid::Stop() {
SetErrorState("failed to stop capture");
return;
}
-}
-void VideoCaptureDeviceAndroid::DeAllocate() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::DeAllocate";
{
base::AutoLock lock(lock_);
- if (state_ == kIdle)
- return;
-
- if (state_ == kCapturing) {
- base::AutoUnlock unlock(lock_);
- Stop();
- }
-
- if (state_ == kAllocated)
- state_ = kIdle;
-
- observer_ = NULL;
+ state_ = kIdle;
+ client_.reset();
}
- JNIEnv* env = AttachCurrentThread();
-
Java_VideoCapture_deallocate(env, j_capture_.obj());
}
@@ -229,13 +195,11 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
jobject obj,
jbyteArray data,
jint length,
- jint rotation,
- jboolean flip_vert,
- jboolean flip_horiz) {
+ jint rotation) {
DVLOG(3) << "VideoCaptureDeviceAndroid::OnFrameAvailable: length =" << length;
base::AutoLock lock(lock_);
- if (state_ != kCapturing || !observer_)
+ if (state_ != kCapturing || !client_.get())
return;
jbyte* buffer = env->GetByteArrayElements(data, NULL);
@@ -256,9 +220,11 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
if (expected_next_frame_time_ <= current_time) {
expected_next_frame_time_ += frame_interval_;
- observer_->OnIncomingCapturedFrame(
- reinterpret_cast<uint8*>(buffer), length, base::Time::Now(),
- rotation, flip_vert, flip_horiz);
+ client_->OnIncomingCapturedFrame(reinterpret_cast<uint8*>(buffer),
+ length,
+ base::Time::Now(),
+ rotation,
+ capture_format_);
}
env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
@@ -291,7 +257,7 @@ void VideoCaptureDeviceAndroid::SetErrorState(const std::string& reason) {
base::AutoLock lock(lock_);
state_ = kError;
}
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/video/capture/android/video_capture_device_android.h
index de6955d9e8f..635417af572 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_android.h
@@ -19,9 +19,9 @@ namespace media {
// VideoCaptureDevice on Android. The VideoCaptureDevice API's are called
// by VideoCaptureManager on its own thread, while OnFrameAvailable is called
-// on JAVA thread (i.e., UI thread). Both will access |state_| and |observer_|,
+// on JAVA thread (i.e., UI thread). Both will access |state_| and |client_|,
// but only VideoCaptureManager would change their value.
-class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
+class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
public:
virtual ~VideoCaptureDeviceAndroid();
@@ -29,12 +29,9 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
static bool RegisterVideoCaptureDevice(JNIEnv* env);
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
// Implement org.chromium.media.VideoCapture.nativeOnFrameAvailable.
void OnFrameAvailable(
@@ -42,14 +39,11 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
jobject obj,
jbyteArray data,
jint length,
- jint rotation,
- jboolean flip_vert,
- jboolean flip_horiz);
+ jint rotation);
private:
enum InternalState {
kIdle, // The device is opened but not in use.
- kAllocated, // All resouces have been allocated and camera can be started.
kCapturing, // Video is being captured.
kError // Hit error. User needs to recover by destroying the object.
};
@@ -66,17 +60,17 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
VideoPixelFormat GetColorspace();
void SetErrorState(const std::string& reason);
- // Prevent racing on accessing |state_| and |observer_| since both could be
+ // Prevent racing on accessing |state_| and |client_| since both could be
// accessed from different threads.
base::Lock lock_;
InternalState state_;
bool got_first_frame_;
base::TimeTicks expected_next_frame_time_;
base::TimeDelta frame_interval_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
Name device_name_;
- VideoCaptureCapability current_settings_;
+ VideoCaptureFormat capture_format_;
// Java VideoCaptureAndroid instance.
base::android::ScopedJavaGlobalRef<jobject> j_capture_;
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/video/capture/fake_video_capture_device.cc
index 8434bc3ebbe..a87514d4347 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/video/capture/fake_video_capture_device.cc
@@ -22,157 +22,161 @@ static const int kFakeCaptureCapabilityChangePeriod = 30;
enum { kNumberOfFakeDevices = 2 };
bool FakeVideoCaptureDevice::fail_next_create_ = false;
+base::subtle::Atomic32 FakeVideoCaptureDevice::number_of_devices_ =
+ kNumberOfFakeDevices;
+// static
+size_t FakeVideoCaptureDevice::NumberOfFakeDevices(void) {
+ return number_of_devices_;
+}
+
+// static
void FakeVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
// Empty the name list.
device_names->erase(device_names->begin(), device_names->end());
- for (int n = 0; n < kNumberOfFakeDevices; n++) {
+ int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
+ for (int32 n = 0; n < number_of_devices; n++) {
Name name(base::StringPrintf("fake_device_%d", n),
base::StringPrintf("/dev/video%d", n));
device_names->push_back(name);
}
}
+// static
+void FakeVideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+
+ supported_formats->clear();
+ VideoCaptureFormat capture_format_640x480;
+ capture_format_640x480.pixel_format = media::PIXEL_FORMAT_I420;
+ capture_format_640x480.frame_size.SetSize(640, 480);
+ capture_format_640x480.frame_rate = 1000 / kFakeCaptureTimeoutMs;
+ supported_formats->push_back(capture_format_640x480);
+ VideoCaptureFormat capture_format_320x240;
+ capture_format_320x240.pixel_format = media::PIXEL_FORMAT_I420;
+ capture_format_320x240.frame_size.SetSize(320, 240);
+ capture_format_320x240.frame_rate = 1000 / kFakeCaptureTimeoutMs;
+ supported_formats->push_back(capture_format_320x240);
+}
+
+// static
VideoCaptureDevice* FakeVideoCaptureDevice::Create(const Name& device_name) {
if (fail_next_create_) {
fail_next_create_ = false;
return NULL;
}
- for (int n = 0; n < kNumberOfFakeDevices; ++n) {
+ int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
+ for (int32 n = 0; n < number_of_devices; ++n) {
std::string possible_id = base::StringPrintf("/dev/video%d", n);
if (device_name.id().compare(possible_id) == 0) {
- return new FakeVideoCaptureDevice(device_name);
+ return new FakeVideoCaptureDevice();
}
}
return NULL;
}
+// static
void FakeVideoCaptureDevice::SetFailNextCreate() {
fail_next_create_ = true;
}
-FakeVideoCaptureDevice::FakeVideoCaptureDevice(const Name& device_name)
- : device_name_(device_name),
- observer_(NULL),
- state_(kIdle),
- capture_thread_("CaptureThread"),
- frame_count_(0),
- capabilities_roster_index_(0) {
+// static
+void FakeVideoCaptureDevice::SetNumberOfFakeDevices(size_t number_of_devices) {
+ base::subtle::NoBarrier_AtomicExchange(&number_of_devices_,
+ number_of_devices);
}
+FakeVideoCaptureDevice::FakeVideoCaptureDevice()
+ : capture_thread_("CaptureThread"),
+ frame_count_(0),
+ format_roster_index_(0) {}
+
FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
- // Check if the thread is running.
- // This means that the device have not been DeAllocated properly.
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!capture_thread_.IsRunning());
}
-void FakeVideoCaptureDevice::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
- capture_format_.frame_size_type = capture_format.frame_size_type;
- if (capture_format.frame_size_type == VariableResolutionVideoCaptureDevice)
- PopulateCapabilitiesRoster();
-
- if (state_ != kIdle) {
- return; // Wrong state.
- }
-
- observer_ = observer;
- capture_format_.color = PIXEL_FORMAT_I420;
- capture_format_.expected_capture_delay = 0;
- capture_format_.interlaced = false;
- if (capture_format.width > 320) { // VGA
- capture_format_.width = 640;
- capture_format_.height = 480;
- capture_format_.frame_rate = 30;
- } else { // QVGA
- capture_format_.width = 320;
- capture_format_.height = 240;
- capture_format_.frame_rate = 30;
- }
+void FakeVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!capture_thread_.IsRunning());
- const size_t fake_frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
- fake_frame_.reset(new uint8[fake_frame_size]);
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this),
+ params,
+ base::Passed(&client)));
+}
- state_ = kAllocated;
- observer_->OnFrameInfo(capture_format_);
+void FakeVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(capture_thread_.IsRunning());
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
}
-void FakeVideoCaptureDevice::Reallocate() {
- DCHECK_EQ(state_, kCapturing);
- capture_format_ = capabilities_roster_.at(++capabilities_roster_index_ %
- capabilities_roster_.size());
- DCHECK_EQ(capture_format_.color, PIXEL_FORMAT_I420);
- DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution ("
- << capture_format_.width << "x" << capture_format_.height << ")";
-
- const size_t fake_frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
+void FakeVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ client_ = client.Pass();
+ capture_format_.pixel_format = PIXEL_FORMAT_I420;
+ capture_format_.frame_rate = 30;
+ if (params.requested_format.frame_size.width() > 320)
+ capture_format_.frame_size.SetSize(640, 480);
+ else
+ capture_format_.frame_size.SetSize(320, 240);
+ if (params.allow_resolution_change)
+ PopulateFormatRoster();
+ const size_t fake_frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
fake_frame_.reset(new uint8[fake_frame_size]);
- observer_->OnFrameInfoChanged(capture_format_);
-}
-
-void FakeVideoCaptureDevice::Start() {
- if (state_ != kAllocated) {
- return; // Wrong state.
- }
- state_ = kCapturing;
- capture_thread_.Start();
capture_thread_.message_loop()->PostTask(
FROM_HERE,
base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
base::Unretained(this)));
}
-void FakeVideoCaptureDevice::Stop() {
- if (state_ != kCapturing) {
- return; // Wrong state.
- }
- capture_thread_.Stop();
- state_ = kAllocated;
-}
-
-void FakeVideoCaptureDevice::DeAllocate() {
- if (state_ != kAllocated && state_ != kCapturing) {
- return; // Wrong state.
- }
- capture_thread_.Stop();
- state_ = kIdle;
-}
-
-const VideoCaptureDevice::Name& FakeVideoCaptureDevice::device_name() {
- return device_name_;
+void FakeVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ client_.reset();
}
void FakeVideoCaptureDevice::OnCaptureTask() {
- if (state_ != kCapturing) {
+ if (!client_)
return;
- }
- const size_t frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
+ const size_t frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
memset(fake_frame_.get(), 0, frame_size);
SkBitmap bitmap;
bitmap.setConfig(SkBitmap::kA8_Config,
- capture_format_.width,
- capture_format_.height,
- capture_format_.width);
- bitmap.setPixels(fake_frame_.get());
+ capture_format_.frame_size.width(),
+ capture_format_.frame_size.height(),
+ capture_format_.frame_size.width()),
+ bitmap.setPixels(fake_frame_.get());
SkCanvas canvas(bitmap);
// Draw a sweeping circle to show an animation.
- int radius = std::min(capture_format_.width, capture_format_.height) / 4;
- SkRect rect = SkRect::MakeXYWH(
- capture_format_.width / 2 - radius, capture_format_.height / 2 - radius,
- 2 * radius, 2 * radius);
+ int radius = std::min(capture_format_.frame_size.width(),
+ capture_format_.frame_size.height()) /
+ 4;
+ SkRect rect =
+ SkRect::MakeXYWH(capture_format_.frame_size.width() / 2 - radius,
+ capture_format_.frame_size.height() / 2 - radius,
+ 2 * radius,
+ 2 * radius);
SkPaint paint;
paint.setStyle(SkPaint::kFill_Style);
@@ -209,12 +213,14 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
frame_count_++;
- // Give the captured frame to the observer.
- observer_->OnIncomingCapturedFrame(
- fake_frame_.get(), frame_size, base::Time::Now(), 0, false, false);
+ // Give the captured frame to the client.
+ client_->OnIncomingCapturedFrame(fake_frame_.get(),
+ frame_size,
+ base::Time::Now(),
+ 0,
+ capture_format_);
if (!(frame_count_ % kFakeCaptureCapabilityChangePeriod) &&
- (capture_format_.frame_size_type ==
- VariableResolutionVideoCaptureDevice)) {
+ format_roster_.size() > 0U) {
Reallocate();
}
// Reschedule next CaptureTask.
@@ -225,33 +231,29 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
base::TimeDelta::FromMilliseconds(kFakeCaptureTimeoutMs));
}
-void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(320,
- 240,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(800,
- 600,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
-
- capabilities_roster_index_ = 0;
+void FakeVideoCaptureDevice::Reallocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ capture_format_ =
+ format_roster_.at(++format_roster_index_ % format_roster_.size());
+ DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
+ DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution "
+ << capture_format_.frame_size.ToString();
+
+ const size_t fake_frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
+ fake_frame_.reset(new uint8[fake_frame_size]);
+}
+
+void FakeVideoCaptureDevice::PopulateFormatRoster() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(320, 240), 30, PIXEL_FORMAT_I420));
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(640, 480), 30, PIXEL_FORMAT_I420));
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(800, 600), 30, PIXEL_FORMAT_I420));
+
+ format_roster_index_ = 0;
}
} // namespace media
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/video/capture/fake_video_capture_device.h
index e8ab25567f7..399a68268fb 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/video/capture/fake_video_capture_device.h
@@ -10,64 +10,70 @@
#include <string>
+#include "base/atomicops.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
#include "media/video/capture/video_capture_device.h"
namespace media {
-class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice1 {
+class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
public:
static VideoCaptureDevice* Create(const Name& device_name);
virtual ~FakeVideoCaptureDevice();
// Used for testing. This will make sure the next call to Create will
// return NULL;
static void SetFailNextCreate();
+ static void SetNumberOfFakeDevices(size_t number_of_devices);
+ static size_t NumberOfFakeDevices();
static void GetDeviceNames(Names* device_names);
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
private:
- // Flag indicating the internal state.
- enum InternalState {
- kIdle,
- kAllocated,
- kCapturing,
- kError
- };
- explicit FakeVideoCaptureDevice(const Name& device_name);
-
- // Called on the capture_thread_.
- void OnCaptureTask();
+ FakeVideoCaptureDevice();
- // EXPERIMENTAL, similar to allocate, but changes resolution and calls
- // observer->OnFrameInfoChanged(VideoCaptureCapability&)
+ // Called on the |capture_thread_| only.
+ void OnAllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
+ void OnCaptureTask();
void Reallocate();
- void PopulateCapabilitiesRoster();
+ void PopulateFormatRoster();
+
+ // |thread_checker_| is used to check that destructor, AllocateAndStart() and
+ // StopAndDeAllocate() are called in the correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
- Name device_name_;
- VideoCaptureDevice::EventHandler* observer_;
- InternalState state_;
base::Thread capture_thread_;
+ // The following members are only used on the |capture_thread_|.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
scoped_ptr<uint8[]> fake_frame_;
int frame_count_;
- VideoCaptureCapability capture_format_;
+ VideoCaptureFormat capture_format_;
- // When the device is configured as mutating video captures, this vector
- // holds the available ones which are used in sequence, restarting at the end.
- std::vector<VideoCaptureCapability> capabilities_roster_;
- int capabilities_roster_index_;
+ // When the device is allowed to change resolution, this vector holds the
+ // available ones which are used in sequence, restarting at the end. These
+ // two members belong to and are only used in |capture_thread_|.
+ std::vector<VideoCaptureFormat> format_roster_;
+ int format_roster_index_;
static bool fail_next_create_;
+ // |number_of_devices_| is atomic since tests can call SetNumberOfFakeDevices
+ // on the IO thread to set |number_of_devices_|. The variable can be
+ // read from a separate thread.
+ // TODO(perkj): Make tests independent of global state. crbug/323913
+ static base::subtle::Atomic32 number_of_devices_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FakeVideoCaptureDevice);
+ DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice);
};
} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.cc b/chromium/media/video/capture/file_video_capture_device.cc
new file mode 100644
index 00000000000..6f118d29e38
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device.cc
@@ -0,0 +1,300 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/file_video_capture_device.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/sys_string_conversions.h"
+#include "media/base/media_switches.h"
+
+
+namespace media {
+static const char kFileVideoCaptureDeviceName[] =
+ "/dev/placeholder-for-file-backed-fake-capture-device";
+
+static const int kY4MHeaderMaxSize = 200;
+static const char kY4MSimpleFrameDelimiter[] = "FRAME";
+static const int kY4MSimpleFrameDelimiterSize = 6;
+
+int ParseY4MInt(const base::StringPiece& token) {
+ int temp_int;
+ CHECK(base::StringToInt(token, &temp_int));
+ return temp_int;
+}
+
+// Extract numerator and denominator out of a token that must have the aspect
+// numerator:denominator, both integer numbers.
+void ParseY4MRational(const base::StringPiece& token,
+ int* numerator,
+ int* denominator) {
+ size_t index_divider = token.find(':');
+ CHECK_NE(index_divider, token.npos);
+ *numerator = ParseY4MInt(token.substr(0, index_divider));
+ *denominator = ParseY4MInt(token.substr(index_divider + 1, token.length()));
+ CHECK(*denominator);
+}
+
+// This function parses the ASCII string in |header| as belonging to a Y4M file,
+// returning the collected format in |video_format|. For a non authoritative
+// explanation of the header format, check
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2
+// Restrictions: Only interlaced I420 pixel format is supported, and pixel
+// aspect ratio is ignored.
+// Implementation notes: Y4M header should end with an ASCII 0x20 (whitespace)
+// character, however all examples mentioned in the Y4M header description end
+// with a newline character instead. Also, some headers do _not_ specify pixel
+// format, in this case it means I420.
+// This code was inspired by third_party/libvpx/.../y4minput.* .
+void ParseY4MTags(const std::string& file_header,
+ media::VideoCaptureFormat* video_format) {
+ video_format->pixel_format = media::PIXEL_FORMAT_I420;
+ video_format->frame_size.set_width(0);
+ video_format->frame_size.set_height(0);
+ size_t index = 0;
+ size_t blank_position = 0;
+ base::StringPiece token;
+ while ((blank_position = file_header.find_first_of("\n ", index)) !=
+ std::string::npos) {
+ // Every token is supposed to have an identifier letter and a bunch of
+ // information immediately after, which we extract into a |token| here.
+ token =
+ base::StringPiece(&file_header[index + 1], blank_position - index - 1);
+ CHECK(!token.empty());
+ switch (file_header[index]) {
+ case 'W':
+ video_format->frame_size.set_width(ParseY4MInt(token));
+ break;
+ case 'H':
+ video_format->frame_size.set_height(ParseY4MInt(token));
+ break;
+ case 'F': {
+ // If the token is "FRAME", it means we have finished with the header.
+ if (token[0] == 'R')
+ break;
+ int fps_numerator, fps_denominator;
+ ParseY4MRational(token, &fps_numerator, &fps_denominator);
+ video_format->frame_rate = fps_numerator / fps_denominator;
+ break;
+ }
+ case 'I':
+ // Interlacing is ignored, but we don't like mixed modes.
+ CHECK_NE(token[0], 'm');
+ break;
+ case 'A':
+ // Pixel aspect ratio ignored.
+ break;
+ case 'C':
+ CHECK_EQ(ParseY4MInt(token), 420); // Only I420 supported.
+ break;
+ default:
+ break;
+ }
+ // We're done if we have found a newline character right after the token.
+ if (file_header[blank_position] == '\n')
+ break;
+ index = blank_position + 1;
+ }
+ // Last video format semantic correctness check before sending it back.
+ CHECK(video_format->IsValid());
+}
+
+// Reads and parses the header of a Y4M |file|, returning the collected pixel
+// format in |video_format|. Returns the index of the first byte of the first
+// video frame.
+// Restrictions: Only trivial per-frame headers are supported.
+int64 ParseFileAndExtractVideoFormat(
+ const base::PlatformFile& file,
+ media::VideoCaptureFormat* video_format) {
+ std::string header(kY4MHeaderMaxSize, 0);
+ base::ReadPlatformFile(file, 0, &header[0], kY4MHeaderMaxSize - 1);
+
+ size_t header_end = header.find(kY4MSimpleFrameDelimiter);
+ CHECK_NE(header_end, header.npos);
+
+ ParseY4MTags(header, video_format);
+ return header_end + kY4MSimpleFrameDelimiterSize;
+}
+
+// Opens a given file for reading, and returns the file to the caller, who is
+// responsible for closing it.
+base::PlatformFile OpenFileForRead(const base::FilePath& file_path) {
+ base::PlatformFileError file_error;
+ base::PlatformFile file = base::CreatePlatformFile(
+ file_path,
+ base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
+ NULL,
+ &file_error);
+ CHECK_EQ(file_error, base::PLATFORM_FILE_OK);
+ return file;
+}
+
+// Inspects the command line and retrieves the file path parameter.
+base::FilePath GetFilePathFromCommandLine() {
+ base::FilePath command_line_file_path =
+ CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ switches::kUseFileForFakeVideoCapture);
+ CHECK(!command_line_file_path.empty());
+ return command_line_file_path;
+}
+
+void FileVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
+ DCHECK(device_names->empty());
+ base::FilePath command_line_file_path = GetFilePathFromCommandLine();
+#if defined(OS_WIN)
+ device_names->push_back(
+ Name(base::SysWideToUTF8(command_line_file_path.value()),
+ kFileVideoCaptureDeviceName));
+#else
+ device_names->push_back(Name(command_line_file_path.value(),
+ kFileVideoCaptureDeviceName));
+#endif // OS_WIN
+}
+
+void FileVideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+ base::PlatformFile file = OpenFileForRead(GetFilePathFromCommandLine());
+ VideoCaptureFormat capture_format;
+ ParseFileAndExtractVideoFormat(file, &capture_format);
+ supported_formats->push_back(capture_format);
+
+ CHECK(base::ClosePlatformFile(file));
+}
+
+VideoCaptureDevice* FileVideoCaptureDevice::Create(const Name& device_name) {
+#if defined(OS_WIN)
+ return new FileVideoCaptureDevice(
+ base::FilePath(base::SysUTF8ToWide(device_name.name())));
+#else
+ return new FileVideoCaptureDevice(base::FilePath(device_name.name()));
+#endif // OS_WIN
+}
+
+FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
+ : capture_thread_("CaptureThread"),
+ file_path_(file_path),
+ file_(base::kInvalidPlatformFileValue),
+ frame_size_(0),
+ current_byte_index_(0),
+ first_frame_byte_index_(0) {}
+
+FileVideoCaptureDevice::~FileVideoCaptureDevice() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Check if the thread is running.
+ // This means that the device have not been DeAllocated properly.
+ CHECK(!capture_thread_.IsRunning());
+}
+
+void FileVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(!capture_thread_.IsRunning());
+
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this),
+ params,
+ base::Passed(&client)));
+}
+
+void FileVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(capture_thread_.IsRunning());
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
+}
+
+int FileVideoCaptureDevice::CalculateFrameSize() {
+ DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ return capture_format_.frame_size.GetArea() * 12 / 8;
+}
+
+void FileVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+
+ client_ = client.Pass();
+
+ // Open the file and parse the header. Get frame size and format.
+ DCHECK_EQ(file_, base::kInvalidPlatformFileValue);
+ file_ = OpenFileForRead(file_path_);
+ first_frame_byte_index_ =
+ ParseFileAndExtractVideoFormat(file_, &capture_format_);
+ current_byte_index_ = first_frame_byte_index_;
+ DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
+ << ", fps: " << capture_format_.frame_rate;
+
+ frame_size_ = CalculateFrameSize();
+ video_frame_.reset(new uint8[frame_size_]);
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)));
+}
+
+void FileVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ CHECK(base::ClosePlatformFile(file_));
+ client_.reset();
+ current_byte_index_ = 0;
+ first_frame_byte_index_ = 0;
+ frame_size_ = 0;
+ video_frame_.reset();
+}
+
+void FileVideoCaptureDevice::OnCaptureTask() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ if (!client_)
+ return;
+ int result =
+ base::ReadPlatformFile(file_,
+ current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_);
+
+ // If we passed EOF to PlatformFile, it will return 0 read characters. In that
+ // case, reset the pointer and read again.
+ if (result != frame_size_) {
+ CHECK_EQ(result, 0);
+ current_byte_index_ = first_frame_byte_index_;
+ CHECK_EQ(base::ReadPlatformFile(file_,
+ current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_),
+ frame_size_);
+ } else {
+ current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
+ }
+
+ // Give the captured frame to the client.
+ client_->OnIncomingCapturedFrame(video_frame_.get(),
+ frame_size_,
+ base::Time::Now(),
+ 0,
+ capture_format_);
+ // Reschedule next CaptureTask.
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)),
+ base::TimeDelta::FromSeconds(1) / capture_format_.frame_rate);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.h b/chromium/media/video/capture/file_video_capture_device.h
new file mode 100644
index 00000000000..06e6033254d
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device.h
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/platform_file.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace media {
+
+// Implementation of a VideoCaptureDevice class that reads from a file. Used for
+// testing the video capture pipeline when no real hardware is available. The
+// only supported file format is YUV4MPEG2 (a.k.a. Y4M), a minimal container
+// with a series of uncompressed video only frames, see the link
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2 for more information
+// on the file format. Several restrictions and notes apply, see the
+// implementation file.
+// Example videos can be found in http://media.xiph.org/video/derf.
+class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
+ public:
+ // VideoCaptureDevice implementation, static methods. Create() returns a
+ // pointer to the object, fully owned by the caller.
+ // TODO(mcasas): Create() should return a scoped_ptr<> http://crbug.com/321613
+ static VideoCaptureDevice* Create(const Name& device_name);
+ static void GetDeviceNames(Names* device_names);
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
+
+ // VideoCaptureDevice implementation, class methods.
+ virtual ~FileVideoCaptureDevice();
+ virtual void AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
+
+ private:
+ // Constructor of the class, with a fully qualified file path as input, which
+ // represents the Y4M video file to stream repeatedly.
+ explicit FileVideoCaptureDevice(const base::FilePath& file_path);
+ // Returns size in bytes of an I420 frame, not including possible paddings,
+ // defined by |capture_format_|.
+ int CalculateFrameSize();
+
+ // Called on the |capture_thread_|.
+ void OnAllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
+ void OnCaptureTask();
+
+ // |thread_checker_| is used to check that destructor, AllocateAndStart() and
+ // StopAndDeAllocate() are called in the correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
+
+ // |capture_thread_| is used for internal operations via posting tasks to it.
+ // It is active between OnAllocateAndStart() and OnStopAndDeAllocate().
+ base::Thread capture_thread_;
+ // The following members belong to |capture_thread_|.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ const base::FilePath file_path_;
+ base::PlatformFile file_;
+ scoped_ptr<uint8[]> video_frame_;
+ VideoCaptureFormat capture_format_;
+ int frame_size_;
+ int64 current_byte_index_;
+ int64 first_frame_byte_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileVideoCaptureDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/video/capture/linux/video_capture_device_linux.cc
index fdd52772cb1..21f57ee132a 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.cc
@@ -45,7 +45,7 @@ static const int32 kV4l2RawFmts[] = {
V4L2_PIX_FMT_YUYV
};
-// USB VID and PID are both 4 bytes long
+// USB VID and PID are both 4 bytes long.
static const size_t kVidPidSize = 4;
// /sys/class/video4linux/video{N}/device is a symlink to the corresponding
@@ -55,6 +55,8 @@ static const char kVidPathTemplate[] =
static const char kPidPathTemplate[] =
"/sys/class/video4linux/%s/device/../idProduct";
+// This function translates Video4Linux pixel formats to Chromium pixel formats,
+// should only support those listed in GetListOfUsableFourCCs.
static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
int32 v4l2_fourcc) {
VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
@@ -69,8 +71,9 @@ static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
case V4L2_PIX_FMT_JPEG:
result = PIXEL_FORMAT_MJPEG;
break;
+ default:
+ DVLOG(1) << "Unsupported pixel format " << std::hex << v4l2_fourcc;
}
- DCHECK_NE(result, PIXEL_FORMAT_UNKNOWN);
return result;
}
@@ -141,6 +144,76 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+void VideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+ if (device.id().empty())
+ return;
+ int fd;
+ if ((fd = open(device.id().c_str(), O_RDONLY)) < 0)
+ return;
+
+ supported_formats->clear();
+ // Retrieve the caps one by one, first get pixel format, then sizes, then
+ // frame rates. See http://linuxtv.org/downloads/v4l-dvb-apis for reference.
+ v4l2_fmtdesc pixel_format = {};
+ pixel_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ while (ioctl(fd, VIDIOC_ENUM_FMT, &pixel_format) == 0) {
+ VideoCaptureFormat supported_format;
+ supported_format.pixel_format =
+ V4l2ColorToVideoCaptureColorFormat((int32)pixel_format.pixelformat);
+ if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN) {
+ ++pixel_format.index;
+ continue;
+ }
+
+ v4l2_frmsizeenum frame_size = {};
+ frame_size.pixel_format = pixel_format.pixelformat;
+ while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ supported_format.frame_size.SetSize(
+ frame_size.discrete.width, frame_size.discrete.height);
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ }
+ v4l2_frmivalenum frame_interval = {};
+ frame_interval.pixel_format = pixel_format.pixelformat;
+ frame_interval.width = frame_size.discrete.width;
+ frame_interval.height = frame_size.discrete.height;
+ while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval) == 0) {
+ if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ if (frame_interval.discrete.numerator != 0) {
+ supported_format.frame_rate =
+ static_cast<float>(frame_interval.discrete.denominator) /
+ static_cast<float>(frame_interval.discrete.numerator);
+ } else {
+ supported_format.frame_rate = 0;
+ }
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ }
+ supported_formats->push_back(supported_format);
+ ++frame_interval.index;
+ }
+ ++frame_size.index;
+ }
+ ++pixel_format.index;
+ }
+
+ close(fd);
+ return;
+}
+
static bool ReadIdFile(const std::string path, std::string* id) {
char id_buf[kVidPidSize];
FILE* file = fopen(path.c_str(), "rb");
@@ -196,14 +269,12 @@ VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
: state_(kIdle),
- observer_(NULL),
device_name_(device_name),
device_fd_(-1),
v4l2_thread_("V4L2Thread"),
buffer_pool_(NULL),
buffer_pool_size_(0),
- timeout_count_(0) {
-}
+ timeout_count_(0) {}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
state_ = kIdle;
@@ -217,68 +288,45 @@ VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
}
}
-void VideoCaptureDeviceLinux::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceLinux::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
if (v4l2_thread_.IsRunning()) {
return; // Wrong state.
}
v4l2_thread_.Start();
- v4l2_thread_.message_loop()
- ->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnAllocate,
- base::Unretained(this),
- capture_format.width,
- capture_format.height,
- capture_format.frame_rate,
- observer));
-}
-
-void VideoCaptureDeviceLinux::Start() {
- if (!v4l2_thread_.IsRunning()) {
- return; // Wrong state.
- }
- v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnStart, base::Unretained(this)));
-}
-
-void VideoCaptureDeviceLinux::Stop() {
- if (!v4l2_thread_.IsRunning()) {
- return; // Wrong state.
- }
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnStop, base::Unretained(this)));
+ base::Bind(&VideoCaptureDeviceLinux::OnAllocateAndStart,
+ base::Unretained(this),
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate,
+ base::Passed(&client)));
}
-void VideoCaptureDeviceLinux::DeAllocate() {
+void VideoCaptureDeviceLinux::StopAndDeAllocate() {
if (!v4l2_thread_.IsRunning()) {
return; // Wrong state.
}
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnDeAllocate,
+ base::Bind(&VideoCaptureDeviceLinux::OnStopAndDeAllocate,
base::Unretained(this)));
v4l2_thread_.Stop();
-
// Make sure no buffers are still allocated.
// This can happen (theoretically) if an error occurs when trying to stop
// the camera.
DeAllocateVideoBuffers();
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceLinux::device_name() {
- return device_name_;
-}
-
-void VideoCaptureDeviceLinux::OnAllocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) {
+void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
+ int height,
+ int frame_rate,
+ scoped_ptr<Client> client) {
DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- observer_ = observer;
+ client_ = client.Pass();
// Need to open camera with O_RDWR after Linux kernel 3.3.
if ((device_fd_ = open(device_name_.id().c_str(), O_RDWR)) < 0) {
@@ -359,46 +407,13 @@ void VideoCaptureDeviceLinux::OnAllocate(int width,
// framerate configuration, or the actual one is different from the desired?
// Store our current width and height.
- VideoCaptureCapability current_settings;
- current_settings.color = V4l2ColorToVideoCaptureColorFormat(
- video_fmt.fmt.pix.pixelformat);
- current_settings.width = video_fmt.fmt.pix.width;
- current_settings.height = video_fmt.fmt.pix.height;
- current_settings.frame_rate = frame_rate;
- current_settings.expected_capture_delay = 0;
- current_settings.interlaced = false;
-
- state_ = kAllocated;
- // Report the resulting frame size to the observer.
- observer_->OnFrameInfo(current_settings);
-}
-
-void VideoCaptureDeviceLinux::OnDeAllocate() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- // If we are in error state or capturing
- // try to stop the camera.
- if (state_ == kCapturing) {
- OnStop();
- }
- if (state_ == kAllocated) {
- state_ = kIdle;
- }
-
- // We need to close and open the device if we want to change the settings
- // Otherwise VIDIOC_S_FMT will return error
- // Sad but true.
- close(device_fd_);
- device_fd_ = -1;
-}
-
-void VideoCaptureDeviceLinux::OnStart() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- if (state_ != kAllocated) {
- return;
- }
+ capture_format_.frame_size.SetSize(video_fmt.fmt.pix.width,
+ video_fmt.fmt.pix.height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format =
+ V4l2ColorToVideoCaptureColorFormat(video_fmt.fmt.pix.pixelformat);
+ // Start capturing.
if (!AllocateVideoBuffers()) {
// Error, We can not recover.
SetErrorState("Allocate buffer failed");
@@ -420,11 +435,9 @@ void VideoCaptureDeviceLinux::OnStart() {
base::Unretained(this)));
}
-void VideoCaptureDeviceLinux::OnStop() {
+void VideoCaptureDeviceLinux::OnStopAndDeAllocate() {
DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- state_ = kAllocated;
-
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(device_fd_, VIDIOC_STREAMOFF, &type) < 0) {
SetErrorState("VIDIOC_STREAMOFF failed");
@@ -433,6 +446,14 @@ void VideoCaptureDeviceLinux::OnStop() {
// We don't dare to deallocate the buffers if we can't stop
// the capture device.
DeAllocateVideoBuffers();
+
+ // We need to close and open the device if we want to change the settings
+ // Otherwise VIDIOC_S_FMT will return error
+ // Sad but true.
+ close(device_fd_);
+ device_fd_ = -1;
+ state_ = kIdle;
+ client_.reset();
}
void VideoCaptureDeviceLinux::OnCaptureTask() {
@@ -488,9 +509,12 @@ void VideoCaptureDeviceLinux::OnCaptureTask() {
buffer.memory = V4L2_MEMORY_MMAP;
// Dequeue a buffer.
if (ioctl(device_fd_, VIDIOC_DQBUF, &buffer) == 0) {
- observer_->OnIncomingCapturedFrame(
- static_cast<uint8*> (buffer_pool_[buffer.index].start),
- buffer.bytesused, base::Time::Now(), 0, false, false);
+ client_->OnIncomingCapturedFrame(
+ static_cast<uint8*>(buffer_pool_[buffer.index].start),
+ buffer.bytesused,
+ base::Time::Now(),
+ 0,
+ capture_format_);
// Enqueue the buffer again.
if (ioctl(device_fd_, VIDIOC_QBUF, &buffer) == -1) {
@@ -581,9 +605,11 @@ void VideoCaptureDeviceLinux::DeAllocateVideoBuffers() {
}
void VideoCaptureDeviceLinux::SetErrorState(const std::string& reason) {
+ DCHECK(!v4l2_thread_.IsRunning() ||
+ v4l2_thread_.message_loop() == base::MessageLoop::current());
DVLOG(1) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/video/capture/linux/video_capture_device_linux.h
index aab61aed77b..a5917b71f12 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.h
@@ -18,23 +18,20 @@
namespace media {
-class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
+class VideoCaptureDeviceLinux : public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceLinux(const Name& device_name);
virtual ~VideoCaptureDeviceLinux();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) OVERRIDE;
+
+ virtual void StopAndDeAllocate() OVERRIDE;
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
- kAllocated, // The camera has been allocated and can be started.
kCapturing, // Video is being captured.
kError // Error accessing HW functions.
// User needs to recover by destroying the object.
@@ -48,13 +45,11 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
};
// Called on the v4l2_thread_.
- void OnAllocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer);
- void OnStart();
- void OnStop();
- void OnDeAllocate();
+ void OnAllocateAndStart(int width,
+ int height,
+ int frame_rate,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
void OnCaptureTask();
bool AllocateVideoBuffers();
@@ -62,13 +57,14 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
void SetErrorState(const std::string& reason);
InternalState state_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
Name device_name_;
int device_fd_; // File descriptor for the opened camera device.
base::Thread v4l2_thread_; // Thread used for reading data from the device.
Buffer* buffer_pool_;
int buffer_pool_size_; // Number of allocated buffers.
int timeout_count_;
+ VideoCaptureFormat capture_format_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceLinux);
};
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.h b/chromium/media/video/capture/mac/avfoundation_glue.h
new file mode 100644
index 00000000000..f9b23a2c240
--- /dev/null
+++ b/chromium/media/video/capture/mac/avfoundation_glue.h
@@ -0,0 +1,158 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AVFoundation API is only introduced in Mac OS X > 10.6, and there is only one
+// build of Chromium, so the (potential) linking with AVFoundation has to happen
+// in runtime. For this to be clean, an AVFoundationGlue class is defined to try
+// and load these AVFoundation system libraries. If it succeeds, subsequent
+// clients can use AVFoundation via the rest of the classes declared in this
+// file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
+
+#import <Foundation/Foundation.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#import "media/video/capture/mac/coremedia_glue.h"
+
+class MEDIA_EXPORT AVFoundationGlue {
+ public:
+ // This method returns true if the OS version supports AVFoundation and the
+ // AVFoundation bundle could be loaded correctly, or false otherwise.
+ static bool IsAVFoundationSupported();
+
+ static NSBundle const* AVFoundationBundle();
+
+ static void* AVFoundationLibraryHandle();
+
+ // Originally coming from AVCaptureDevice.h but in global namespace.
+ static NSString* AVCaptureDeviceWasConnectedNotification();
+ static NSString* AVCaptureDeviceWasDisconnectedNotification();
+
+ // Originally coming from AVMediaFormat.h but in global namespace.
+ static NSString* AVMediaTypeVideo();
+ static NSString* AVMediaTypeAudio();
+ static NSString* AVMediaTypeMuxed();
+
+ // Originally from AVCaptureSession.h but in global namespace.
+ static NSString* AVCaptureSessionRuntimeErrorNotification();
+ static NSString* AVCaptureSessionDidStopRunningNotification();
+ static NSString* AVCaptureSessionErrorKey();
+ static NSString* AVCaptureSessionPreset320x240();
+ static NSString* AVCaptureSessionPreset640x480();
+ static NSString* AVCaptureSessionPreset1280x720();
+
+ // Originally from AVVideoSettings.h but in global namespace.
+ static NSString* AVVideoScalingModeKey();
+ static NSString* AVVideoScalingModeResizeAspect();
+
+ static Class AVCaptureSessionClass();
+ static Class AVCaptureVideoDataOutputClass();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AVFoundationGlue);
+};
+
+// Originally AVCaptureDevice and coming from AVCaptureDevice.h
+MEDIA_EXPORT
+@interface CrAVCaptureDevice : NSObject
+
+- (BOOL)hasMediaType:(NSString*)mediaType;
+- (NSString*)uniqueID;
+- (NSString*)localizedName;
+- (BOOL)supportsAVCaptureSessionPreset:(NSString*)preset;
+
+@end
+
+MEDIA_EXPORT
+@interface CrAVCaptureInput // Originally from AVCaptureInput.h.
+@end
+
+MEDIA_EXPORT
+@interface CrAVCaptureOutput // Originally from AVCaptureOutput.h.
+@end
+
+// Originally AVCaptureSession and coming from AVCaptureSession.h.
+MEDIA_EXPORT
+@interface CrAVCaptureSession : NSObject
+
+- (void)release;
+- (BOOL)canSetSessionPreset:(NSString*)preset;
+- (void)setSessionPreset:(NSString*)preset;
+- (NSString*)sessionPreset;
+- (void)addInput:(CrAVCaptureInput*)input;
+- (void)removeInput:(CrAVCaptureInput*)input;
+- (void)addOutput:(CrAVCaptureOutput*)output;
+- (void)removeOutput:(CrAVCaptureOutput*)output;
+- (BOOL)isRunning;
+- (void)startRunning;
+- (void)stopRunning;
+
+@end
+
+// Originally AVCaptureConnection and coming from AVCaptureSession.h.
+MEDIA_EXPORT
+@interface CrAVCaptureConnection : NSObject
+
+- (BOOL)isVideoMinFrameDurationSupported;
+- (void)setVideoMinFrameDuration:(CoreMediaGlue::CMTime)minFrameRate;
+- (BOOL)isVideoMaxFrameDurationSupported;
+- (void)setVideoMaxFrameDuration:(CoreMediaGlue::CMTime)maxFrameRate;
+
+@end
+
+// Originally AVCaptureDeviceInput and coming from AVCaptureInput.h.
+MEDIA_EXPORT
+@interface CrAVCaptureDeviceInput : CrAVCaptureInput
+
+@end
+
+// Originally AVCaptureVideoDataOutputSampleBufferDelegate from
+// AVCaptureOutput.h.
+@protocol CrAVCaptureVideoDataOutputSampleBufferDelegate <NSObject>
+
+@optional
+
+- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
+didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection;
+
+@end
+
+// Originally AVCaptureVideoDataOutput and coming from AVCaptureOutput.h.
+MEDIA_EXPORT
+@interface CrAVCaptureVideoDataOutput : CrAVCaptureOutput
+
+- (oneway void)release;
+- (void)setSampleBufferDelegate:(id)sampleBufferDelegate
+ queue:(dispatch_queue_t)sampleBufferCallbackQueue;
+
+- (void)setVideoSettings:(NSDictionary*)videoSettings;
+- (NSDictionary*)videoSettings;
+- (CrAVCaptureConnection*)connectionWithMediaType:(NSString*)mediaType;
+
+@end
+
+// Class to provide access to class methods of AVCaptureDevice.
+MEDIA_EXPORT
+@interface AVCaptureDeviceGlue : NSObject
+
++ (NSArray*)devices;
+
++ (CrAVCaptureDevice*)deviceWithUniqueID:(NSString*)deviceUniqueID;
+
+@end
+
+// Class to provide access to class methods of AVCaptureDeviceInput.
+MEDIA_EXPORT
+@interface AVCaptureDeviceInputGlue : NSObject
+
++ (CrAVCaptureDeviceInput*)deviceInputWithDevice:(CrAVCaptureDevice*)device
+ error:(NSError**)outError;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.mm b/chromium/media/video/capture/mac/avfoundation_glue.mm
new file mode 100644
index 00000000000..1610d0f104a
--- /dev/null
+++ b/chromium/media/video/capture/mac/avfoundation_glue.mm
@@ -0,0 +1,161 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/video/capture/mac/avfoundation_glue.h"
+
+#include <dlfcn.h>
+
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/mac/mac_util.h"
+#include "media/base/media_switches.h"
+
+namespace {
+
+// This class is used to retrieve AVFoundation NSBundle and library handle. It
+// must be used as a LazyInstance so that it is initialised once and in a
+// thread-safe way. Normally no work is done in constructors: LazyInstance is
+// an exception.
+class AVFoundationInternal {
+ public:
+ AVFoundationInternal() {
+ bundle_ = [NSBundle
+ bundleWithPath:@"/System/Library/Frameworks/AVFoundation.framework"];
+
+ const char* path = [[bundle_ executablePath] fileSystemRepresentation];
+ CHECK(path);
+ library_handle_ = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+ CHECK(library_handle_) << dlerror();
+ }
+ NSBundle* bundle() const { return bundle_; }
+ void* library_handle() const { return library_handle_; }
+
+ private:
+ NSBundle* bundle_;
+ void* library_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVFoundationInternal);
+};
+
+} // namespace
+
+static base::LazyInstance<AVFoundationInternal> g_avfoundation_handle =
+ LAZY_INSTANCE_INITIALIZER;
+
+namespace media {
+
+// TODO(mcasas):http://crbug.com/323536 cache the string pointers.
+static NSString* ReadNSStringPtr(const char* symbol) {
+ NSString** string_pointer = reinterpret_cast<NSString**>(
+ dlsym(AVFoundationGlue::AVFoundationLibraryHandle(), symbol));
+ DCHECK(string_pointer) << dlerror();
+ return *string_pointer;
+}
+
+} // namespace media
+
+bool AVFoundationGlue::IsAVFoundationSupported() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ return cmd_line->HasSwitch(switches::kEnableAVFoundation) &&
+ base::mac::IsOSLionOrLater() && [AVFoundationBundle() load];
+}
+
+NSBundle const* AVFoundationGlue::AVFoundationBundle() {
+ return g_avfoundation_handle.Get().bundle();
+}
+
+void* AVFoundationGlue::AVFoundationLibraryHandle() {
+ return g_avfoundation_handle.Get().library_handle();
+}
+
+NSString* AVFoundationGlue::AVCaptureDeviceWasConnectedNotification() {
+ return media::ReadNSStringPtr("AVCaptureDeviceWasConnectedNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureDeviceWasDisconnectedNotification() {
+ return media::ReadNSStringPtr("AVCaptureDeviceWasDisconnectedNotification");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeVideo() {
+ return media::ReadNSStringPtr("AVMediaTypeVideo");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeAudio() {
+ return media::ReadNSStringPtr("AVMediaTypeAudio");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeMuxed() {
+ return media::ReadNSStringPtr("AVMediaTypeMuxed");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification() {
+ return media::ReadNSStringPtr("AVCaptureSessionRuntimeErrorNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionDidStopRunningNotification() {
+ return media::ReadNSStringPtr("AVCaptureSessionDidStopRunningNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionErrorKey() {
+ return media::ReadNSStringPtr("AVCaptureSessionErrorKey");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset320x240() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset320x240");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset640x480() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset640x480");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset1280x720() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset1280x720");
+}
+
+NSString* AVFoundationGlue::AVVideoScalingModeKey() {
+ return media::ReadNSStringPtr("AVVideoScalingModeKey");
+}
+
+NSString* AVFoundationGlue::AVVideoScalingModeResizeAspect() {
+ return media::ReadNSStringPtr("AVVideoScalingModeResizeAspect");
+}
+
+Class AVFoundationGlue::AVCaptureSessionClass() {
+ return [AVFoundationBundle() classNamed:@"AVCaptureSession"];
+}
+
+Class AVFoundationGlue::AVCaptureVideoDataOutputClass() {
+ return [AVFoundationBundle() classNamed:@"AVCaptureVideoDataOutput"];
+}
+
+@implementation AVCaptureDeviceGlue
+
++ (NSArray*)devices {
+ Class avcClass =
+ [AVFoundationGlue::AVFoundationBundle() classNamed:@"AVCaptureDevice"];
+ if ([avcClass respondsToSelector:@selector(devices)]) {
+ return [avcClass performSelector:@selector(devices)];
+ }
+ return nil;
+}
+
++ (CrAVCaptureDevice*)deviceWithUniqueID:(NSString*)deviceUniqueID {
+ Class avcClass =
+ [AVFoundationGlue::AVFoundationBundle() classNamed:@"AVCaptureDevice"];
+ return [avcClass performSelector:@selector(deviceWithUniqueID:)
+ withObject:deviceUniqueID];
+}
+
+@end // @implementation AVCaptureDeviceGlue
+
+@implementation AVCaptureDeviceInputGlue
+
++ (CrAVCaptureDeviceInput*)deviceInputWithDevice:(CrAVCaptureDevice*)device
+ error:(NSError**)outError {
+ return [[AVFoundationGlue::AVFoundationBundle()
+ classNamed:@"AVCaptureDeviceInput"] deviceInputWithDevice:device
+ error:outError];
+}
+
+@end // @implementation AVCaptureDeviceInputGlue
diff --git a/chromium/media/video/capture/mac/coremedia_glue.h b/chromium/media/video/capture/mac/coremedia_glue.h
new file mode 100644
index 00000000000..a1f21eb1480
--- /dev/null
+++ b/chromium/media/video/capture/mac/coremedia_glue.h
@@ -0,0 +1,46 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
+
+#import <CoreVideo/CoreVideo.h>
+#import <Foundation/Foundation.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+// CoreMedia API is only introduced in Mac OS X > 10.6, the (potential) linking
+// with it has to happen in runtime. If it succeeds, subsequent clients can use
+// CoreMedia via the class declared in this file, where the original naming has
+// been kept as much as possible.
+class MEDIA_EXPORT CoreMediaGlue {
+ public:
+ // Originally from CMTime.h
+ typedef int64_t CMTimeValue;
+ typedef int32_t CMTimeScale;
+ typedef int64_t CMTimeEpoch;
+ typedef uint32_t CMTimeFlags;
+ typedef struct {
+ CMTimeValue value;
+ CMTimeScale timescale;
+ CMTimeFlags flags;
+ CMTimeEpoch epoch;
+ } CMTime;
+
+ // Originally from CMSampleBuffer.h.
+ typedef struct OpaqueCMSampleBuffer* CMSampleBufferRef;
+
+ // Originally from CMTime.h.
+ static CMTime CMTimeMake(int64_t value, int32_t timescale);
+
+ // Originally from CMSampleBuffer.h.
+ static CVImageBufferRef CMSampleBufferGetImageBuffer(
+ CMSampleBufferRef buffer);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CoreMediaGlue);
+};
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
diff --git a/chromium/media/video/capture/mac/coremedia_glue.mm b/chromium/media/video/capture/mac/coremedia_glue.mm
new file mode 100644
index 00000000000..f94256b6c92
--- /dev/null
+++ b/chromium/media/video/capture/mac/coremedia_glue.mm
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/mac/coremedia_glue.h"
+
+#include <dlfcn.h>
+
+#include "base/logging.h"
+#include "base/lazy_instance.h"
+
+namespace {
+
+// This class is used to retrieve some CoreMedia library functions. It must be
+// used as a LazyInstance so that it is initialised once and in a thread-safe
+// way. Normally no work is done in constructors: LazyInstance is an exception.
+class CoreMediaLibraryInternal {
+ public:
+ typedef CoreMediaGlue::CMTime (*CMTimeMakeMethod)(int64_t, int32_t);
+ typedef CVImageBufferRef (*CMSampleBufferGetImageBufferMethod)(
+ CoreMediaGlue::CMSampleBufferRef);
+
+ CoreMediaLibraryInternal() {
+ NSBundle* bundle = [NSBundle
+ bundleWithPath:@"/System/Library/Frameworks/CoreMedia.framework"];
+
+ const char* path = [[bundle executablePath] fileSystemRepresentation];
+ CHECK(path);
+ void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+ CHECK(library_handle) << dlerror();
+
+ // Now extract the methods.
+ cm_time_make_ = reinterpret_cast<CMTimeMakeMethod>(
+ dlsym(library_handle, "CMTimeMake"));
+ CHECK(cm_time_make_) << dlerror();
+
+ cm_sample_buffer_get_image_buffer_method_ =
+ reinterpret_cast<CMSampleBufferGetImageBufferMethod>(
+ dlsym(library_handle, "CMSampleBufferGetImageBuffer"));
+ CHECK(cm_sample_buffer_get_image_buffer_method_) << dlerror();
+ }
+
+ const CMTimeMakeMethod& cm_time_make() const { return cm_time_make_; }
+ const CMSampleBufferGetImageBufferMethod&
+ cm_sample_buffer_get_image_buffer_method() const {
+ return cm_sample_buffer_get_image_buffer_method_;
+ }
+
+ private:
+ CMTimeMakeMethod cm_time_make_;
+ CMSampleBufferGetImageBufferMethod cm_sample_buffer_get_image_buffer_method_;
+
+ DISALLOW_COPY_AND_ASSIGN(CoreMediaLibraryInternal);
+};
+
+} // namespace
+
+static base::LazyInstance<CoreMediaLibraryInternal> g_coremedia_handle =
+ LAZY_INSTANCE_INITIALIZER;
+
+CoreMediaGlue::CMTime CoreMediaGlue::CMTimeMake(int64_t value,
+ int32_t timescale) {
+ return g_coremedia_handle.Get().cm_time_make()(value, timescale);
+}
+
+CVImageBufferRef CoreMediaGlue::CMSampleBufferGetImageBuffer(
+ CMSampleBufferRef buffer) {
+ return g_coremedia_handle.Get().cm_sample_buffer_get_image_buffer_method()(
+ buffer);
+}
diff --git a/chromium/media/video/capture/mac/platform_video_capturing_mac.h b/chromium/media/video/capture/mac/platform_video_capturing_mac.h
new file mode 100644
index 00000000000..466ae1bc8fd
--- /dev/null
+++ b/chromium/media/video/capture/mac/platform_video_capturing_mac.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+// Protocol representing platform-dependent video capture on Mac, implemented
+// by both QTKit and AVFoundation APIs.
+@protocol PlatformVideoCapturingMac <NSObject>
+
+// This method initializes the instance by calling NSObject |init| and registers
+// internally a frame receiver at the same time. The frame receiver is supposed
+// to be initialised before and outlive the VideoCapturingDeviceMac
+// implementation.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Set the frame receiver. This method executes the registration in mutual
+// exclusion.
+// TODO(mcasas): This method and stopCapture() are always called in sequence and
+// this one is only used to clear the frameReceiver, investigate if both can be
+// merged.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name passed as deviceId argument. The
+// device names are usually obtained via VideoCaptureDevice::GetDeviceNames()
+// method. This method will also configure all device properties except those in
+// setCaptureHeight:widht:frameRate. If |deviceId| is nil, all potential
+// configuration is torn down. Returns YES on sucess, NO otherwise.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties.
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
+
+// Start video capturing, register observers. Returns YES on sucess, NO
+// otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing, unregisters observers.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
new file mode 100644
index 00000000000..0e617e90cda
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
@@ -0,0 +1,113 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+#import "base/mac/scoped_nsobject.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+@class CrAVCaptureDevice;
+@class CrAVCaptureSession;
+@class CrAVCaptureVideoDataOutput;
+
+// Class used by VideoCaptureDeviceMac (VCDM) for video capture using
+// AVFoundation API. This class lives inside the thread created by its owner
+// VCDM.
+//
+// * Clients (VCDM) should call +deviceNames to fetch the list of devices
+// available in the system; this method returns the list of device names that
+// have to be used with -setCaptureDevice:.
+// * Previous to any use, clients (VCDM) must call -initWithFrameReceiver: to
+// initialise an object of this class and register a |frameReceiver_|.
+// * Frame receiver registration or removal can also happen via explicit call
+// to -setFrameReceiver:. Re-registrations are safe and allowed, even during
+// capture using this method.
+// * Method -setCaptureDevice: must be called at least once with a device
+// identifier from +deviceNames. Creates all the necessary AVFoundation
+// objects on first call; it connects them ready for capture every time.
+// This method should not be called during capture (i.e. between
+// -startCapture and -stopCapture).
+// * -setCaptureWidth:height:frameRate: is called if a resolution or frame rate
+// different than the by default one set by -setCaptureDevice: is needed.
+// This method should not be called during capture. This method must be
+// called after -setCaptureDevice:.
+// * -startCapture registers the notification listeners and starts the
+// capture. The capture can be stop using -stopCapture. The capture can be
+// restarted and restoped multiple times, reconfiguring or not the device in
+// between.
+// * -setCaptureDevice can be called with a |nil| value, case in which it stops
+// the capture and disconnects the library objects. This step is not
+// necessary.
+// * Deallocation of the library objects happens gracefully on destruction of
+// the VideoCaptureDeviceAVFoundation object.
+//
+//
+@interface VideoCaptureDeviceAVFoundation
+ : NSObject<CrAVCaptureVideoDataOutputSampleBufferDelegate,
+ PlatformVideoCapturingMac> {
+ @private
+ // The following attributes are set via -setCaptureHeight:width:frameRate:.
+ int frameWidth_;
+ int frameHeight_;
+ int frameRate_;
+
+ base::Lock lock_; // Protects concurrent setting and using of frameReceiver_.
+ media::VideoCaptureDeviceMac* frameReceiver_; // weak.
+
+ base::scoped_nsobject<CrAVCaptureSession> captureSession_;
+
+ // |captureDevice_| is an object coming from AVFoundation, used only to be
+ // plugged in |captureDeviceInput_| and to query for session preset support.
+ CrAVCaptureDevice* captureDevice_;
+ // |captureDeviceInput_| is owned by |captureSession_|.
+ CrAVCaptureDeviceInput* captureDeviceInput_;
+ base::scoped_nsobject<CrAVCaptureVideoDataOutput> captureVideoDataOutput_;
+
+ base::ThreadChecker thread_checker_;
+}
+
+// Returns a dictionary of capture devices with friendly name and unique id.
++ (NSDictionary*)deviceNames;
+
+// Initializes the instance and the underlying capture session and registers the
+// frame receiver.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets the frame receiver.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name, retrieved via |deviceNames|. Once
+// the deviceId is known, the library objects are created if needed and
+// connected for the capture, and a by default resolution is set. If deviceId is
+// nil, then the eventual capture is stopped and library objects are
+// disconnected. Returns YES on sucess, NO otherwise. This method should not be
+// called during capture.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties for the capture session and the video data
+// output; this means it MUST be called after setCaptureDevice:. Return YES on
+// success, else NO.
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
+
+// Starts video capturing and register the notification listeners. Must be
+// called after setCaptureDevice:, and, eventually, also after
+// setCaptureHeight:width:frameRate:. Returns YES on sucess, NO otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing and stops listening to notifications.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
new file mode 100644
index 00000000000..a6bf920a2c2
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
@@ -0,0 +1,246 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+
+#import <CoreVideo/CoreVideo.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "ui/gfx/size.h"
+
+@implementation VideoCaptureDeviceAVFoundation
+
+#pragma mark Class methods
+
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
+ // At this stage we already know that AVFoundation is supported and the whole
+ // library is loaded and initialised, by the device monitoring.
+ NSArray* devices = [AVCaptureDeviceGlue devices];
+ for (CrAVCaptureDevice* device in devices) {
+ if ([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
+ [device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) {
+ [deviceNames setObject:[device localizedName]
+ forKey:[device uniqueID]];
+ }
+ }
+}
+
++ (NSDictionary*)deviceNames {
+ NSMutableDictionary* deviceNames =
+ [[[NSMutableDictionary alloc] init] autorelease];
+ // The device name retrieval is not going to happen in the main thread, and
+ // this might cause instabilities (it did in QTKit), so keep an eye here.
+ [self getDeviceNames:deviceNames];
+ return deviceNames;
+}
+
+#pragma mark Public methods
+
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ if ((self = [super init])) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(frameReceiver);
+ [self setFrameReceiver:frameReceiver];
+ captureSession_.reset(
+ [[AVFoundationGlue::AVCaptureSessionClass() alloc] init]);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self stopCapture];
+ [super dealloc];
+}
+
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ base::AutoLock lock(lock_);
+ frameReceiver_ = frameReceiver;
+}
+
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
+ DCHECK(captureSession_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!deviceId) {
+ // First stop the capture session, if it's running.
+ [self stopCapture];
+ // Now remove the input and output from the capture session.
+ [captureSession_ removeOutput:captureVideoDataOutput_];
+ if (captureDeviceInput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ // No need to release |captureDeviceInput_|, is owned by the session.
+ captureDeviceInput_ = nil;
+ }
+ return YES;
+ }
+
+ // Look for input device with requested name.
+ captureDevice_ = [AVCaptureDeviceGlue deviceWithUniqueID:deviceId];
+ if (!captureDevice_) {
+ DLOG(ERROR) << "Could not open video capture device.";
+ return NO;
+ }
+
+ // Create the capture input associated with the device. Easy peasy.
+ NSError* error = nil;
+ captureDeviceInput_ = [AVCaptureDeviceInputGlue
+ deviceInputWithDevice:captureDevice_
+ error:&error];
+ if (!captureDeviceInput_) {
+ captureDevice_ = nil;
+ DLOG(ERROR) << "Could not create video capture input: "
+ << [[error localizedDescription] UTF8String];
+ return NO;
+ }
+ [captureSession_ addInput:captureDeviceInput_];
+
+ // Create a new data output for video. The data output is configured to
+ // discard late frames by default.
+ captureVideoDataOutput_.reset(
+ [[AVFoundationGlue::AVCaptureVideoDataOutputClass() alloc] init]);
+ if (!captureVideoDataOutput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ DLOG(ERROR) << "Could not create video data output.";
+ return NO;
+ }
+ [captureVideoDataOutput_
+ setSampleBufferDelegate:self
+ queue:dispatch_get_global_queue(
+ DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
+ [captureSession_ addOutput:captureVideoDataOutput_];
+ return YES;
+}
+
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ frameWidth_ = width;
+ frameHeight_ = height;
+ frameRate_ = frameRate;
+
+ // Identify the sessionPreset that corresponds to the desired resolution.
+ NSString* sessionPreset;
+ if (width == 1280 && height == 720 && [captureSession_ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset1280x720()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset1280x720();
+ } else if (width == 640 && height == 480 && [captureSession_
+ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset640x480()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset640x480();
+ } else if (width == 320 && height == 240 && [captureSession_
+ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset320x240()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset320x240();
+ } else {
+ DLOG(ERROR) << "Unsupported resolution (" << width << "x" << height << ")";
+ return NO;
+ }
+ [captureSession_ setSessionPreset:sessionPreset];
+
+ // Check that our capture Device can be used with the current preset.
+ if (![captureDevice_ supportsAVCaptureSessionPreset:
+ [captureSession_ sessionPreset]]){
+ DLOG(ERROR) << "Video capture device does not support current preset";
+ return NO;
+ }
+
+ // Despite all Mac documentation detailing that setting the sessionPreset is
+ // enough, that is not the case for, at least, the MacBook Air built-in
+ // FaceTime HD Camera, and the capture output has to be configured as well.
+ // The reason for this mismatch is probably because most of the AVFoundation
+ // docs are written for iOS and not for MacOsX.
+ // AVVideoScalingModeKey() refers to letterboxing yes/no and preserve aspect
+ // ratio yes/no when scaling. Currently we set letterbox and preservation.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width),
+ (id)kCVPixelBufferHeightKey : @(height),
+ (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_422YpCbCr8),
+ AVFoundationGlue::AVVideoScalingModeKey() :
+ AVFoundationGlue::AVVideoScalingModeResizeAspect()
+ };
+ [captureVideoDataOutput_ setVideoSettings:videoSettingsDictionary];
+
+ CrAVCaptureConnection* captureConnection = [captureVideoDataOutput_
+ connectionWithMediaType:AVFoundationGlue::AVMediaTypeVideo()];
+ // TODO(mcasas): Check selector existence, related to bugs
+ // http://crbug.com/327532 and http://crbug.com/328096.
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
+ [captureConnection isVideoMinFrameDurationSupported]) {
+ [captureConnection setVideoMinFrameDuration:
+ CoreMediaGlue::CMTimeMake(1, frameRate)];
+ }
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
+ [captureConnection isVideoMaxFrameDurationSupported]) {
+ [captureConnection setVideoMaxFrameDuration:
+ CoreMediaGlue::CMTimeMake(1, frameRate)];
+ }
+ return YES;
+}
+
+- (BOOL)startCapture {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!captureSession_) {
+ DLOG(ERROR) << "Video capture session not initialized.";
+ return NO;
+ }
+ // Connect the notifications.
+ NSNotificationCenter* nc = [NSNotificationCenter defaultCenter];
+ [nc addObserver:self
+ selector:@selector(onVideoError:)
+ name:AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification()
+ object:captureSession_];
+ [captureSession_ startRunning];
+ return YES;
+}
+
+- (void)stopCapture {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if ([captureSession_ isRunning])
+ [captureSession_ stopRunning]; // Synchronous.
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
+#pragma mark Private methods
+
+// |captureOutput| is called by the capture device to deliver a new frame.
+- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
+ didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection {
+ CVImageBufferRef videoFrame =
+ CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
+ // Lock the frame and calculate frame size.
+ const int kLockFlags = 0;
+ if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags) ==
+ kCVReturnSuccess) {
+ void* baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
+ size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
+ size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
+ size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
+ size_t frameSize = bytesPerRow * frameHeight;
+ UInt8* addressToPass = reinterpret_cast<UInt8*>(baseAddress);
+
+ media::VideoCaptureFormat captureFormat(
+ gfx::Size(frameWidth, frameHeight),
+ frameRate_,
+ media::PIXEL_FORMAT_UYVY);
+ base::AutoLock lock(lock_);
+ if (!frameReceiver_)
+ return;
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat, 0, 0);
+ CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
+ }
+}
+
+- (void)onVideoError:(NSNotification*)errorNotification {
+ NSError* error = base::mac::ObjCCast<NSError>([[errorNotification userInfo]
+ objectForKey:AVFoundationGlue::AVCaptureSessionErrorKey()]);
+ base::AutoLock lock(lock_);
+ if (frameReceiver_)
+ frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
+}
+
+@end
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/video/capture/mac/video_capture_device_mac.h
index e600459e2c9..474e7e1bf45 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.h
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// OS X implementation of VideoCaptureDevice, using QTKit as native capture API.
+// MacOSX implementation of generic VideoCaptureDevice, using either QTKit or
+// AVFoundation as native capture API. QTKit is used in OSX versions 10.6 and
+// previous, and AVFoundation is used in the rest.
#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
@@ -16,31 +18,29 @@
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
-@class VideoCaptureDeviceQTKit;
+@protocol PlatformVideoCapturingMac;
namespace media {
// Called by VideoCaptureManager to open, close and start, stop video capture
// devices.
-class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
+class VideoCaptureDeviceMac : public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceMac(const Name& device_name);
virtual ~VideoCaptureDeviceMac();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
bool Init();
// Called to deliver captured video frames.
void ReceiveFrame(const uint8* video_frame,
int video_frame_length,
- const VideoCaptureCapability& frame_info,
+ const VideoCaptureFormat& frame_format,
int aspect_numerator,
int aspect_denominator);
@@ -54,16 +54,16 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
enum InternalState {
kNotInitialized,
kIdle,
- kAllocated,
kCapturing,
kError
};
Name device_name_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
- VideoCaptureCapability current_settings_;
+ VideoCaptureFormat capture_format_;
bool sent_frame_info_;
+ bool tried_to_square_pixels_;
// Only read and write state_ from inside this loop.
const scoped_refptr<base::MessageLoopProxy> loop_proxy_;
@@ -74,7 +74,7 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
base::WeakPtr<VideoCaptureDeviceMac> weak_this_;
- VideoCaptureDeviceQTKit* capture_device_;
+ id<PlatformVideoCapturingMac> capture_device_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceMac);
};
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/video/capture/mac/video_capture_device_mac.mm
index eea861481fe..dba4fa1c6fb 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.mm
@@ -4,15 +4,16 @@
#include "media/video/capture/mac/video_capture_device_mac.h"
-#import <QTKit/QTKit.h>
-
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
-namespace {
+namespace media {
const int kMinFrameRate = 1;
const int kMaxFrameRate = 30;
@@ -35,6 +36,12 @@ const Resolution* const kWellSupportedResolutions[] = {
&kHD,
};
+// Rescaling the image to fix the pixel aspect ratio runs the risk of making
+// the aspect ratio worse, if QTKit selects a new source mode with a different
+// shape. This constant ensures that we don't take this risk if the current
+// aspect ratio is tolerable.
+const float kMaxPixelAspectRatio = 1.15;
+
// TODO(ronghuawu): Replace this with CapabilityList::GetBestMatchedCapability.
void GetBestMatchSupportedResolution(int* width, int* height) {
int min_diff = kint32max;
@@ -55,15 +62,18 @@ void GetBestMatchSupportedResolution(int* width, int* height) {
*height = matched_height;
}
-}
-
-namespace media {
-
void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
// Loop through all available devices and add to |device_names|.
device_names->clear();
- NSDictionary* capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ NSDictionary* capture_devices;
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ DVLOG(1) << "Enumerating video capture devices using AVFoundation";
+ capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
+ } else {
+ DVLOG(1) << "Enumerating video capture devices using QTKit";
+ capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ }
for (NSString* key in capture_devices) {
Name name([[capture_devices valueForKey:key] UTF8String],
[key UTF8String]);
@@ -71,6 +81,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
const std::string VideoCaptureDevice::Name::GetModel() const {
// Both PID and VID are 4 characters.
if (unique_id_.size() < 2 * kVidPidSize) {
@@ -99,8 +115,8 @@ VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
: device_name_(device_name),
- observer_(NULL),
sent_frame_info_(false),
+ tried_to_square_pixels_(false),
loop_proxy_(base::MessageLoopProxy::current()),
state_(kNotInitialized),
weak_factory_(this),
@@ -113,24 +129,23 @@ VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
[capture_device_ release];
}
-void VideoCaptureDeviceMac::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceMac::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
if (state_ != kIdle) {
return;
}
- int width = capture_format.width;
- int height = capture_format.height;
- int frame_rate = capture_format.frame_rate;
+ int width = params.requested_format.frame_size.width();
+ int height = params.requested_format.frame_size.height();
+ int frame_rate = params.requested_format.frame_rate;
- // QTKit can scale captured frame to any size requested, which would lead to
- // undesired aspect ratio change. Tries to open the camera with a natively
- // supported format and let the client to crop/pad the captured frames.
- GetBestMatchSupportedResolution(&width,
- &height);
+ // The OS API can scale captured frame to any size requested, which would lead
+ // to undesired aspect ratio change. Try to open the camera with a natively
+ // supported format and let the client crop/pad the captured frames.
+ GetBestMatchSupportedResolution(&width, &height);
- observer_ = observer;
+ client_ = client.Pass();
NSString* deviceId =
[NSString stringWithUTF8String:device_name_.id().c_str()];
@@ -145,23 +160,18 @@ void VideoCaptureDeviceMac::Allocate(
else if (frame_rate > kMaxFrameRate)
frame_rate = kMaxFrameRate;
- current_settings_.color = PIXEL_FORMAT_UYVY;
- current_settings_.width = width;
- current_settings_.height = height;
- current_settings_.frame_rate = frame_rate;
- current_settings_.expected_capture_delay = 0;
- current_settings_.interlaced = false;
+ capture_format_.frame_size.SetSize(width, height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format = PIXEL_FORMAT_UYVY;
- if (width != kHD.width || height != kHD.height) {
+ if (width <= kVGA.width || height <= kVGA.height) {
// If the resolution is VGA or QVGA, set the capture resolution to the
- // target size. For most cameras (though not all), at these resolutions
- // QTKit produces frames with square pixels.
+ // target size. Essentially all supported cameras offer at least VGA.
if (!UpdateCaptureResolution())
return;
-
- sent_frame_info_ = true;
- observer_->OnFrameInfo(current_settings_);
}
+ // For higher resolutions, we first open at the default resolution to find
+ // out if the request is larger than the camera's native resolution.
// If the resolution is HD, start capturing without setting a resolution.
// QTKit will produce frames at the native resolution, allowing us to
@@ -174,56 +184,46 @@ void VideoCaptureDeviceMac::Allocate(
return;
}
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceMac::Start() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
- DCHECK_EQ(state_, kAllocated);
state_ = kCapturing;
-
- // This method no longer has any effect. Capturing is triggered by
- // the call to Allocate.
- // TODO(bemasc, ncarter): Remove this method.
}
-void VideoCaptureDeviceMac::Stop() {
+void VideoCaptureDeviceMac::StopAndDeAllocate() {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK(state_ == kCapturing || state_ == kError) << state_;
[capture_device_ stopCapture];
- state_ = kAllocated;
-}
-void VideoCaptureDeviceMac::DeAllocate() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
- if (state_ != kAllocated && state_ != kCapturing) {
- return;
- }
- if (state_ == kCapturing) {
- [capture_device_ stopCapture];
- }
[capture_device_ setCaptureDevice:nil];
[capture_device_ setFrameReceiver:nil];
-
+ client_.reset();
state_ = kIdle;
-}
-
-const VideoCaptureDevice::Name& VideoCaptureDeviceMac::device_name() {
- return device_name_;
+ tried_to_square_pixels_ = false;
}
bool VideoCaptureDeviceMac::Init() {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK_EQ(state_, kNotInitialized);
+ // TODO(mcasas): The following check might not be necessary; if the device has
+ // disappeared after enumeration and before coming here, opening would just
+ // fail but not necessarily produce a crash.
Names device_names;
GetDeviceNames(&device_names);
- Name* found = device_names.FindById(device_name_.id());
- if (!found)
+ Names::iterator it = device_names.begin();
+ for (; it != device_names.end(); ++it) {
+ if (it->id() == device_name_.id())
+ break;
+ }
+ if (it == device_names.end())
return false;
- capture_device_ =
- [[VideoCaptureDeviceQTKit alloc] initWithFrameReceiver:this];
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ capture_device_ =
+ [[VideoCaptureDeviceAVFoundation alloc] initWithFrameReceiver:this];
+ } else {
+ capture_device_ =
+ [[VideoCaptureDeviceQTKit alloc] initWithFrameReceiver:this];
+ }
+
if (!capture_device_)
return false;
@@ -234,54 +234,81 @@ bool VideoCaptureDeviceMac::Init() {
void VideoCaptureDeviceMac::ReceiveFrame(
const uint8* video_frame,
int video_frame_length,
- const VideoCaptureCapability& frame_info,
+ const VideoCaptureFormat& frame_format,
int aspect_numerator,
int aspect_denominator) {
// This method is safe to call from a device capture thread,
// i.e. any thread controlled by QTKit.
if (!sent_frame_info_) {
- if (current_settings_.width == kHD.width &&
- current_settings_.height == kHD.height) {
- bool changeToVga = false;
- if (frame_info.width < kHD.width || frame_info.height < kHD.height) {
+ // Final resolution has not yet been selected.
+ if (capture_format_.frame_size.width() > kVGA.width ||
+ capture_format_.frame_size.height() > kVGA.height) {
+ // We are requesting HD. Make sure that the picture is good, otherwise
+ // drop down to VGA.
+ bool change_to_vga = false;
+ if (frame_format.frame_size.width() <
+ capture_format_.frame_size.width() ||
+ frame_format.frame_size.height() <
+ capture_format_.frame_size.height()) {
// These are the default capture settings, not yet configured to match
- // |current_settings_|.
- DCHECK(frame_info.frame_rate == 0);
+ // |capture_format_|.
+ DCHECK(frame_format.frame_rate == 0);
DVLOG(1) << "Switching to VGA because the default resolution is " <<
- frame_info.width << "x" << frame_info.height;
- changeToVga = true;
+ frame_format.frame_size.ToString();
+ change_to_vga = true;
}
- if (frame_info.width == kHD.width && frame_info.height == kHD.height &&
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
aspect_numerator != aspect_denominator) {
DVLOG(1) << "Switching to VGA because HD has nonsquare pixel " <<
"aspect ratio " << aspect_numerator << ":" << aspect_denominator;
- changeToVga = true;
+ change_to_vga = true;
}
- if (changeToVga) {
- current_settings_.width = kVGA.width;
- current_settings_.height = kVGA.height;
+ if (change_to_vga) {
+ capture_format_.frame_size.SetSize(kVGA.width, kVGA.height);
+ }
+ }
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
+ !tried_to_square_pixels_ &&
+ (aspect_numerator > kMaxPixelAspectRatio * aspect_denominator ||
+ aspect_denominator > kMaxPixelAspectRatio * aspect_numerator)) {
+ // The requested size results in non-square PAR.
+ // Shrink the frame to 1:1 PAR (assuming QTKit selects the same input
+ // mode, which is not guaranteed).
+ int new_width = capture_format_.frame_size.width();
+ int new_height = capture_format_.frame_size.height();
+ if (aspect_numerator < aspect_denominator) {
+ new_width = (new_width * aspect_numerator) / aspect_denominator;
+ } else {
+ new_height = (new_height * aspect_denominator) / aspect_numerator;
}
+ capture_format_.frame_size.SetSize(new_width, new_height);
+ tried_to_square_pixels_ = true;
}
- if (current_settings_.width == frame_info.width &&
- current_settings_.height == frame_info.height) {
+ if (capture_format_.frame_size == frame_format.frame_size) {
sent_frame_info_ = true;
- observer_->OnFrameInfo(current_settings_);
} else {
UpdateCaptureResolution();
- // The current frame does not have the right width and height, so it
- // must not be passed to |observer_|.
+ // OnFrameInfo has not yet been called. OnIncomingCapturedFrame must
+ // not be called until after OnFrameInfo, so we return early.
return;
}
}
- DCHECK(current_settings_.width == frame_info.width &&
- current_settings_.height == frame_info.height);
+ DCHECK_EQ(capture_format_.frame_size.width(),
+ frame_format.frame_size.width());
+ DCHECK_EQ(capture_format_.frame_size.height(),
+ frame_format.frame_size.height());
- observer_->OnIncomingCapturedFrame(
- video_frame, video_frame_length, base::Time::Now(), 0, false, false);
+ client_->OnIncomingCapturedFrame(video_frame,
+ video_frame_length,
+ base::Time::Now(),
+ 0,
+ capture_format_);
}
void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
@@ -294,13 +321,13 @@ void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DLOG(ERROR) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
- if (![capture_device_ setCaptureHeight:current_settings_.height
- width:current_settings_.width
- frameRate:current_settings_.frame_rate]) {
+ if (![capture_device_ setCaptureHeight:capture_format_.frame_size.height()
+ width:capture_format_.frame_size.width()
+ frameRate:capture_format_.frame_rate]) {
ReceiveError("Could not configure capture device.");
return false;
}
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
index d032ef0481f..1eba8a12ea2 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
@@ -5,21 +5,23 @@
// VideoCaptureDeviceQTKit implements all QTKit related code for
// communicating with a QTKit capture device.
-#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
-#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
#import <Foundation/Foundation.h>
#include <vector>
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+
namespace media {
- class VideoCaptureDeviceMac;
+class VideoCaptureDeviceMac;
}
@class QTCaptureDeviceInput;
@class QTCaptureSession;
-@interface VideoCaptureDeviceQTKit : NSObject {
+@interface VideoCaptureDeviceQTKit : NSObject<PlatformVideoCapturingMac> {
@private
// Settings.
int frameRate_;
@@ -38,16 +40,16 @@ namespace media {
}
// Returns a dictionary of capture devices with friendly name and unique id.
-+ (NSDictionary *)deviceNames;
++ (NSDictionary*)deviceNames;
// Initializes the instance and registers the frame receiver.
-- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver;
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
// Set the frame receiver.
-- (void)setFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver;
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
// Sets which capture device to use. Returns YES on sucess, NO otherwise.
-- (BOOL)setCaptureDevice:(NSString *)deviceId;
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
// Configures the capture properties.
- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
@@ -59,8 +61,8 @@ namespace media {
- (void)stopCapture;
// Handle any QTCaptureSessionRuntimeErrorNotifications.
-- (void)handleNotification:(NSNotification *)errorNotification;
+- (void)handleNotification:(NSNotification*)errorNotification;
@end
-#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index 2b7e28e4e70..cd9c6d333e9 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -12,6 +12,7 @@
#include "media/video/capture/mac/video_capture_device_mac.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
+#include "ui/gfx/size.h"
@implementation VideoCaptureDeviceQTKit
@@ -28,8 +29,9 @@
});
for (QTCaptureDevice* device in captureDevices) {
- [deviceNames setObject:[device localizedDisplayName]
- forKey:[device uniqueID]];
+ if (![[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
+ [deviceNames setObject:[device localizedDisplayName]
+ forKey:[device uniqueID]];
}
}
@@ -47,7 +49,7 @@
#pragma mark Public methods
-- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver {
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
self = [super init];
if (self) {
frameReceiver_ = frameReceiver;
@@ -62,13 +64,13 @@
[super dealloc];
}
-- (void)setFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver {
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
[lock_ lock];
frameReceiver_ = frameReceiver;
[lock_ unlock];
}
-- (BOOL)setCaptureDevice:(NSString *)deviceId {
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
if (deviceId) {
// Set the capture device.
if (captureDeviceInput_) {
@@ -86,6 +88,11 @@
return NO;
}
QTCaptureDevice *device = [captureDevices objectAtIndex:index];
+ if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute]
+ boolValue]) {
+ DLOG(ERROR) << "Cannot open suspended video capture device.";
+ return NO;
+ }
NSError *error;
if (![device open:&error]) {
DLOG(ERROR) << "Could not open video capture device."
@@ -172,22 +179,15 @@
QTCaptureDecompressedVideoOutput *output =
[[captureSession_ outputs] objectAtIndex:0];
- // The old capture dictionary is used to retrieve the initial pixel
- // format, which must be maintained.
- NSDictionary *oldCaptureDictionary = [output pixelBufferAttributes];
-
- // Set up desired output properties.
- NSDictionary *captureDictionary =
- [NSDictionary dictionaryWithObjectsAndKeys:
- [NSNumber numberWithDouble:width],
- (id)kCVPixelBufferWidthKey,
- [NSNumber numberWithDouble:height],
- (id)kCVPixelBufferHeightKey,
- [oldCaptureDictionary
- valueForKey:(id)kCVPixelBufferPixelFormatTypeKey],
- (id)kCVPixelBufferPixelFormatTypeKey,
- nil];
- [output setPixelBufferAttributes:captureDictionary];
+ // Set up desired output properties. The old capture dictionary is used to
+ // retrieve the initial pixel format, which must be maintained.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width),
+ (id)kCVPixelBufferHeightKey : @(height),
+ (id)kCVPixelBufferPixelFormatTypeKey : [[output pixelBufferAttributes]
+ valueForKey:(id)kCVPixelBufferPixelFormatTypeKey]
+ };
+ [output setPixelBufferAttributes:videoSettingsDictionary];
[output setMinimumVideoFrameInterval:(NSTimeInterval)1/(float)frameRate];
return YES;
@@ -227,10 +227,10 @@
}
// |captureOutput| is called by the capture device to deliver a new frame.
-- (void)captureOutput:(QTCaptureOutput *)captureOutput
+- (void)captureOutput:(QTCaptureOutput*)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
- withSampleBuffer:(QTSampleBuffer *)sampleBuffer
- fromConnection:(QTCaptureConnection *)connection {
+ withSampleBuffer:(QTSampleBuffer*)sampleBuffer
+ fromConnection:(QTCaptureConnection*)connection {
[lock_ lock];
if(!frameReceiver_) {
[lock_ unlock];
@@ -275,13 +275,10 @@
addressToPass = adjustedAddress;
frameSize = frameHeight * expectedBytesPerRow;
}
- media::VideoCaptureCapability captureCapability;
- captureCapability.width = frameWidth;
- captureCapability.height = frameHeight;
- captureCapability.frame_rate = frameRate_;
- captureCapability.color = media::PIXEL_FORMAT_UYVY;
- captureCapability.expected_capture_delay = 0;
- captureCapability.interlaced = false;
+
+ media::VideoCaptureFormat captureFormat(gfx::Size(frameWidth, frameHeight),
+ frameRate_,
+ media::PIXEL_FORMAT_UYVY);
// The aspect ratio dictionary is often missing, in which case we report
// a pixel aspect ratio of 0:0.
@@ -301,7 +298,7 @@
}
// Deliver the captured video frame.
- frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureCapability,
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat,
aspectNumerator, aspectDenominator);
CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
@@ -309,8 +306,8 @@
[lock_ unlock];
}
-- (void)handleNotification:(NSNotification *)errorNotification {
- NSError * error = (NSError *)[[errorNotification userInfo]
+- (void)handleNotification:(NSNotification*)errorNotification {
+ NSError * error = (NSError*)[[errorNotification userInfo]
objectForKey:QTCaptureSessionErrorKey];
frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
}
diff --git a/chromium/media/video/capture/video_capture.h b/chromium/media/video/capture/video_capture.h
index 3a4eb0e2d32..9a0e94378bb 100644
--- a/chromium/media/video/capture/video_capture.h
+++ b/chromium/media/video/capture/video_capture.h
@@ -45,35 +45,23 @@ class MEDIA_EXPORT VideoCapture {
VideoCapture* capture,
const scoped_refptr<media::VideoFrame>& frame) = 0;
- // Notify client about device info.
- virtual void OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) = 0;
-
- // Notify client about the newly changed device info.
- virtual void OnDeviceInfoChanged(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) {};
-
protected:
virtual ~EventHandler() {}
};
VideoCapture() {}
- // Request video capture to start capturing with |capability|.
+ // Request video capture to start capturing with |params|.
// Also register |handler| with video capture for event handling.
// |handler| must remain valid until it has received |OnRemoved()|.
virtual void StartCapture(EventHandler* handler,
- const VideoCaptureCapability& capability) = 0;
+ const VideoCaptureParams& params) = 0;
// Request video capture to stop capturing for client |handler|.
// |handler| must remain valid until it has received |OnRemoved()|.
virtual void StopCapture(EventHandler* handler) = 0;
virtual bool CaptureStarted() = 0;
- virtual int CaptureWidth() = 0;
- virtual int CaptureHeight() = 0;
virtual int CaptureFrameRate() = 0;
protected:
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/video/capture/video_capture_device.cc
index 4175412138f..c370d092c93 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/video/capture/video_capture_device.cc
@@ -17,34 +17,6 @@ const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
return device_name_ + suffix;
}
-VideoCaptureDevice::Name*
-VideoCaptureDevice::Names::FindById(const std::string& id) {
- for (iterator it = begin(); it != end(); ++it) {
- if (it->id() == id)
- return &(*it);
- }
- return NULL;
-}
-
VideoCaptureDevice::~VideoCaptureDevice() {}
-VideoCaptureDevice1::VideoCaptureDevice1() {}
-
-VideoCaptureDevice1::~VideoCaptureDevice1() {}
-
-void VideoCaptureDevice1::AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) {
- client_ = client.Pass();
- Allocate(capture_format, client_.get());
- Start();
-}
-
-void VideoCaptureDevice1::StopAndDeAllocate() {
- Stop();
- DeAllocate();
- client_.reset();
-};
-
-
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/video/capture/video_capture_device.h
index e7340841cee..295401c3686 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/video/capture/video_capture_device.h
@@ -16,8 +16,11 @@
#include <string>
#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
#include "media/video/capture/video_capture_types.h"
namespace media {
@@ -109,32 +112,44 @@ class MEDIA_EXPORT VideoCaptureDevice {
};
// Manages a list of Name entries.
- class MEDIA_EXPORT Names
- : public NON_EXPORTED_BASE(std::list<Name>) {
+ typedef std::list<Name> Names;
+
+ class MEDIA_EXPORT Client {
public:
- // Returns NULL if no entry was found by that ID.
- Name* FindById(const std::string& id);
+ // Memory buffer returned by Client::ReserveOutputBuffer().
+ class Buffer : public base::RefCountedThreadSafe<Buffer> {
+ public:
+ int id() const { return id_; }
+ void* data() const { return data_; }
+ size_t size() const { return size_; }
- // Allow generated copy constructor and assignment.
- };
+ protected:
+ friend class base::RefCountedThreadSafe<Buffer>;
- class MEDIA_EXPORT EventHandler {
- public:
- virtual ~EventHandler() {}
+ Buffer(int id, void* data, size_t size)
+ : id_(id), data_(data), size_(size) {}
+ virtual ~Buffer() {}
- // Reserve an output buffer into which a video frame can be captured
- // directly. If all buffers are currently busy, returns NULL.
- //
- // The returned VideoFrames will always be allocated with a YV12 format. The
- // size will match that specified by an earlier call to OnFrameInfo. It is
- // the VideoCaptureDevice's responsibility to obey whatever stride and
- // memory layout are indicated on the returned VideoFrame object.
+ const int id_;
+ void* const data_;
+ const size_t size_;
+ };
+
+ virtual ~Client() {}
+
+ // Reserve an output buffer into which contents can be captured directly.
+ // The returned Buffer will always be allocated with a memory size suitable
+ // for holding a packed video frame of |format| format, of |dimensions|
+ // dimensions. It is permissible for |dimensions| to be zero; in which
+ // case the returned Buffer does not guarantee memory backing, but functions
+ // as a reservation for external input for the purposes of buffer
+ // throttling.
//
- // The output buffer stays reserved for use by the calling
- // VideoCaptureDevice until either the last reference to the VideoFrame is
- // released, or until the buffer is passed back to the EventHandler's
- // OnIncomingCapturedFrame() method.
- virtual scoped_refptr<media::VideoFrame> ReserveOutputBuffer() = 0;
+ // The output buffer stays reserved for use until the Buffer object is
+ // destroyed.
+ virtual scoped_refptr<Buffer> ReserveOutputBuffer(
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions) = 0;
// Captured a new video frame as a raw buffer. The size, color format, and
// layout are taken from the parameters specified by an earlier call to
@@ -144,44 +159,31 @@ class MEDIA_EXPORT VideoCaptureDevice {
// This method will try to reserve an output buffer and copy from |data|
// into the output buffer. If no output buffer is available, the frame will
// be silently dropped.
- virtual void OnIncomingCapturedFrame(const uint8* data,
- int length,
- base::Time timestamp,
- int rotation, // Clockwise.
- bool flip_vert,
- bool flip_horiz) = 0;
-
- // Captured a new video frame, held in a VideoFrame container.
- //
- // If |frame| was created via the ReserveOutputBuffer() mechanism, then the
- // frame delivery is guaranteed (it will not be silently dropped), and
- // delivery will require no additional copies in the browser process. For
- // such frames, the VideoCaptureDevice's reservation on the output buffer
- // ends immediately. The VideoCaptureDevice may not read or write the
- // underlying memory afterwards, and it should release its references to
- // |frame| as soon as possible, to allow buffer reuse.
+ virtual void OnIncomingCapturedFrame(
+ const uint8* data,
+ int length,
+ base::Time timestamp,
+ int rotation, // Clockwise.
+ const VideoCaptureFormat& frame_format) = 0;
+
+ // Captured a new video frame, held in |buffer|.
//
- // If |frame| was NOT created via ReserveOutputBuffer(), then this method
- // will try to reserve an output buffer and copy from |frame| into the
- // output buffer. If no output buffer is available, the frame will be
- // silently dropped. |frame| must be allocated as RGB32, YV12 or I420, and
- // the size must match that specified by an earlier call to OnFrameInfo().
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- base::Time timestamp) = 0;
+ // As the frame is backed by a reservation returned by
+ // ReserveOutputBuffer(), delivery is guaranteed and will require no
+ // additional copies in the browser process. |dimensions| indicates the
+ // frame width and height of the buffer contents; this is assumed to be of
+ // |format| format and tightly packed.
+ virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions,
+ base::Time timestamp,
+ int frame_rate) = 0;
// An error has occurred that cannot be handled and VideoCaptureDevice must
// be StopAndDeAllocate()-ed.
virtual void OnError() = 0;
-
- // Called when VideoCaptureDevice::AllocateAndStart() has been called to
- // inform of the resulting frame size.
- virtual void OnFrameInfo(const VideoCaptureCapability& info) = 0;
-
- // Called when the native resolution of VideoCaptureDevice has been changed
- // and it needs to inform its client of the new frame size.
- virtual void OnFrameInfoChanged(const VideoCaptureCapability& info) {};
};
+
// Creates a VideoCaptureDevice object.
// Return NULL if the hardware is not available.
static VideoCaptureDevice* Create(const Name& device_name);
@@ -190,13 +192,18 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Gets the names of all video capture devices connected to this computer.
static void GetDeviceNames(Names* device_names);
- // Prepare the camera for use. After this function has been called no other
- // applications can use the camera. On completion EventHandler::OnFrameInfo()
- // is called informing of the resulting resolution and frame rate.
- // StopAndDeAllocate() must be called before the object is deleted.
- virtual void AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) = 0;
+ // Gets the supported formats of a particular device attached to the system.
+ // This method should be called before allocating or starting a device. In
+ // case format enumeration is not supported, or there was a problem, the
+ // formats array will be empty.
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
+
+ // Prepares the camera for use. After this function has been called no other
+ // applications can use the camera. StopAndDeAllocate() must be called before
+ // the object is deleted.
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) = 0;
// Deallocates the camera, possibly asynchronously.
//
@@ -212,58 +219,6 @@ class MEDIA_EXPORT VideoCaptureDevice {
virtual void StopAndDeAllocate() = 0;
};
-// VideoCaptureDevice1 is a bridge to an older API against which
-// VideoCaptureDevices were implemented. Differences between VideoCaptureDevice
-// (new style) and VideoCaptureDevice1 (old style) are as follows:
-//
-// [1] The Stop+DeAllocate calls are merged in the new style.
-// [2] The Allocate+Start calls are merged in the new style.
-// [3] New style devices own their EventHandler* pointers, allowing handlers to
-// remain valid even after the device is stopped. Whereas old style devices
-// may not dereference their handlers after DeAllocate().
-// [4] device_name() is eliminated from the new-style interface.
-//
-// TODO(nick): Remove this bridge class. It exists to enable incremental
-// migration to an alternative VideoCaptureDevice API.
-class MEDIA_EXPORT VideoCaptureDevice1 : public VideoCaptureDevice {
- public:
- VideoCaptureDevice1();
- virtual ~VideoCaptureDevice1();
-
- // VideoCaptureDevice implementation.
- virtual void AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) OVERRIDE;
- virtual void StopAndDeAllocate() OVERRIDE;
-
- // Prepare the camera for use. After this function has been called no other
- // applications can use the camera. On completion EventHandler::OnFrameInfo()
- // is called informing of the resulting resolution and frame rate.
- // DeAllocate() must be called before this function can be called again and
- // before the object is deleted.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* client) = 0;
-
- // Start capturing video frames. Allocate must be called before this function.
- virtual void Start() = 0;
-
- // Stop capturing video frames.
- virtual void Stop() = 0;
-
- // Deallocates the camera. This means other applications can use it. After
- // this function has been called the capture device is reset to the state it
- // was when created. After DeAllocate() is called, the VideoCaptureDevice is
- // not permitted to make any additional calls to its EventHandler.
- virtual void DeAllocate() = 0;
-
- // Get the name of the capture device.
- virtual const Name& device_name() = 0;
-
- private:
- // The device client which proxies device events to the controller.
- scoped_ptr<EventHandler> client_;
-};
-
} // namespace media
#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/video/capture/video_capture_device_unittest.cc
index 586060f169f..5e05ad4b4b2 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/video_capture_device_unittest.cc
@@ -58,70 +58,87 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
using ::testing::AtLeast;
-using ::testing::SaveArg;
namespace media {
-class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
+class MockClient : public media::VideoCaptureDevice::Client {
public:
- MOCK_METHOD0(ReserveOutputBuffer, scoped_refptr<media::VideoFrame>());
+ MOCK_METHOD2(ReserveOutputBuffer,
+ scoped_refptr<Buffer>(media::VideoFrame::Format format,
+ const gfx::Size& dimensions));
MOCK_METHOD0(OnErr, void());
- MOCK_METHOD1(OnFrameInfo, void(const VideoCaptureCapability&));
- MOCK_METHOD1(OnFrameInfoChanged, void(const VideoCaptureCapability&));
- explicit MockFrameObserver(
- base::Closure frame_cb)
- : main_thread_(base::MessageLoopProxy::current()),
- frame_cb_(frame_cb) {}
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
virtual void OnError() OVERRIDE {
OnErr();
}
- virtual void OnIncomingCapturedFrame(
- const uint8* data,
- int length,
- base::Time timestamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz) OVERRIDE {
- main_thread_->PostTask(FROM_HERE, frame_cb_);
+ virtual void OnIncomingCapturedFrame(const uint8* data,
+ int length,
+ base::Time timestamp,
+ int rotation,
+ const VideoCaptureFormat& format)
+ OVERRIDE {
+ main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
}
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- base::Time timestamp) OVERRIDE {
- main_thread_->PostTask(FROM_HERE, frame_cb_);
+ virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions,
+ base::Time timestamp,
+ int frame_rate) OVERRIDE {
+ NOTREACHED();
}
private:
scoped_refptr<base::MessageLoopProxy> main_thread_;
- base::Closure frame_cb_;
+ base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
class VideoCaptureDeviceTest : public testing::Test {
protected:
- typedef media::VideoCaptureDevice::EventHandler EventHandler;
+ typedef media::VideoCaptureDevice::Client Client;
+
+ VideoCaptureDeviceTest()
+ : loop_(new base::MessageLoop()),
+ client_(
+ new MockClient(base::Bind(&VideoCaptureDeviceTest::OnFrameCaptured,
+ base::Unretained(this)))) {}
virtual void SetUp() {
- loop_.reset(new base::MessageLoopForUI());
- frame_observer_.reset(new MockFrameObserver(loop_->QuitClosure()));
#if defined(OS_ANDROID)
media::VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
base::android::AttachCurrentThread());
#endif
}
+ void ResetWithNewClient() {
+ client_.reset(new MockClient(base::Bind(
+ &VideoCaptureDeviceTest::OnFrameCaptured, base::Unretained(this))));
+ }
+
+ void OnFrameCaptured(const VideoCaptureFormat& format) {
+ last_format_ = format;
+ run_loop_->QuitClosure().Run();
+ }
+
void WaitForCapturedFrame() {
- loop_->Run();
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
}
+ const VideoCaptureFormat& last_format() const { return last_format_; }
+
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
- scoped_ptr<MockFrameObserver> frame_observer_;
VideoCaptureDevice::Names names_;
scoped_ptr<base::MessageLoop> loop_;
+ scoped_ptr<base::RunLoop> run_loop_;
+ scoped_ptr<MockClient> client_;
+ VideoCaptureFormat last_format_;
};
TEST_F(VideoCaptureDeviceTest, OpenInvalidDevice) {
@@ -149,27 +166,20 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_FALSE(device.get() == NULL);
DVLOG(1) << names_.front().id();
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&rx_capability));
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
- loop_->Run();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
device->StopAndDeAllocate();
}
@@ -184,24 +194,15 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_FALSE(device.get() == NULL);
- // Get info about the new resolution.
- // We don't care about the resulting resolution or frame rate as it might
- // be different from one machine to the next.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1);
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(1280,
- 720,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(1280, 720);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
device->StopAndDeAllocate();
@@ -217,26 +218,19 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(AtLeast(1)).WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(637,
- 472,
- 35,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(637, 472);
+ capture_params.requested_format.frame_rate = 35;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ WaitForCapturedFrame();
device->StopAndDeAllocate();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
}
TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
@@ -248,8 +242,7 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
// First, do a number of very fast device start/stops.
for (int i = 0; i <= 5; i++) {
- scoped_ptr<MockFrameObserver> frame_observer(
- new MockFrameObserver(base::Bind(&base::DoNothing)));
+ ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
gfx::Size resolution;
@@ -258,54 +251,32 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
} else {
resolution = gfx::Size(1280, 1024);
}
- VideoCaptureCapability requested_format(
- resolution.width(),
- resolution.height(),
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
-
- // The device (if it is an async implementation) may or may not get as far
- // as the OnFrameInfo() step; we're intentionally not going to wait for it
- // to get that far.
- ON_CALL(*frame_observer, OnFrameInfo(_));
- device->AllocateAndStart(requested_format,
- frame_observer.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size = resolution;
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
device->StopAndDeAllocate();
}
// Finally, do a device start and wait for it to finish.
- gfx::Size resolution;
- VideoCaptureCapability requested_format(
- 320,
- 240,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
-
- base::RunLoop run_loop;
- scoped_ptr<MockFrameObserver> frame_observer(
- new MockFrameObserver(base::Bind(run_loop.QuitClosure())));
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(320, 240);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+
+ ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- // The device (if it is an async implementation) may or may not get as far
- // as the OnFrameInfo() step; we're intentionally not going to wait for it
- // to get that far.
- VideoCaptureCapability final_format;
- EXPECT_CALL(*frame_observer, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&final_format));
- device->AllocateAndStart(requested_format,
- frame_observer.PassAs<EventHandler>());
- run_loop.Run(); // Waits for a frame.
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ WaitForCapturedFrame();
device->StopAndDeAllocate();
device.reset();
- EXPECT_EQ(final_format.width, 320);
- EXPECT_EQ(final_format.height, 240);
+ EXPECT_EQ(last_format().frame_size.width(), 320);
+ EXPECT_EQ(last_format().frame_size.height(), 240);
}
TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
@@ -318,27 +289,20 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
- EXPECT_EQ(rx_capability.frame_rate, 30);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
device->StopAndDeAllocate();
}
@@ -353,27 +317,19 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
FakeVideoCaptureDevice::Create(names.front()));
ASSERT_TRUE(device.get() != NULL);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&rx_capability));
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
- EXPECT_EQ(rx_capability.frame_rate, 30);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
device->StopAndDeAllocate();
}
@@ -388,38 +344,48 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Verify we get MJPEG from the device. Not all devices can capture 1280x720
- // @ 30 fps, so we don't care about the exact resolution we get.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(1280,
- 720,
- 30,
- PIXEL_FORMAT_MJPEG,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(1280, 720);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_MJPEG;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.color, PIXEL_FORMAT_MJPEG);
+ // Verify we get MJPEG from the device. Not all devices can capture 1280x720
+ // @ 30 fps, so we don't care about the exact resolution we get.
+ EXPECT_EQ(last_format().pixel_format, PIXEL_FORMAT_MJPEG);
device->StopAndDeAllocate();
}
+TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
+ VideoCaptureDevice::GetDeviceNames(&names_);
+ if (!names_.size()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::Names::iterator names_iterator;
+ for (names_iterator = names_.begin(); names_iterator != names_.end();
+ ++names_iterator) {
+ VideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
+ &supported_formats);
+ // Nothing to test here since we cannot forecast the hardware capabilities.
+ }
+}
+
TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
VideoCaptureDevice::Names names;
FakeVideoCaptureDevice::GetDeviceNames(&names);
- media::VideoCaptureCapability capture_format;
- capture_format.width = 640;
- capture_format.height = 480;
- capture_format.frame_rate = 30;
- capture_format.frame_size_type = media::VariableResolutionVideoCaptureDevice;
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = true;
ASSERT_GT(static_cast<int>(names.size()), 0);
@@ -427,21 +393,12 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
FakeVideoCaptureDevice::Create(names.front()));
ASSERT_TRUE(device.get() != NULL);
- // Get info about the new resolution.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1);
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
int action_count = 200;
- EXPECT_CALL(*frame_observer_, OnFrameInfoChanged(_))
- .Times(AtLeast(action_count / 30));
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
- // The amount of times the OnFrameInfoChanged gets called depends on how often
- // FakeDevice is supposed to change and what is its actual frame rate.
// We set TimeWait to 200 action timeouts and this should be enough for at
// least action_count/kFakeCaptureCapabilityChangePeriod calls.
for (int i = 0; i < action_count; ++i) {
@@ -450,4 +407,27 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
device->StopAndDeAllocate();
}
+TEST_F(VideoCaptureDeviceTest, FakeGetDeviceSupportedFormats) {
+ VideoCaptureDevice::Names names;
+ FakeVideoCaptureDevice::GetDeviceNames(&names);
+
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::Names::iterator names_iterator;
+
+ for (names_iterator = names.begin(); names_iterator != names.end();
+ ++names_iterator) {
+ FakeVideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
+ &supported_formats);
+ EXPECT_EQ(supported_formats.size(), 2u);
+ EXPECT_EQ(supported_formats[0].frame_size.width(), 640);
+ EXPECT_EQ(supported_formats[0].frame_size.height(), 480);
+ EXPECT_EQ(supported_formats[0].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[0].frame_rate, 20);
+ EXPECT_EQ(supported_formats[1].frame_size.width(), 320);
+ EXPECT_EQ(supported_formats[1].frame_size.height(), 240);
+ EXPECT_EQ(supported_formats[1].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[1].frame_rate, 20);
+ }
+}
+
}; // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.cc b/chromium/media/video/capture/video_capture_proxy.cc
index 3adbb7ce3b2..d488c50fe02 100644
--- a/chromium/media/video/capture/video_capture_proxy.cc
+++ b/chromium/media/video/capture/video_capture_proxy.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/video_frame.h"
namespace {
@@ -16,8 +17,6 @@ media::VideoCaptureHandlerProxy::VideoCaptureState GetState(
media::VideoCapture* capture) {
media::VideoCaptureHandlerProxy::VideoCaptureState state;
state.started = capture->CaptureStarted();
- state.width = capture->CaptureWidth();
- state.height = capture->CaptureHeight();
state.frame_rate = capture->CaptureFrameRate();
return state;
}
@@ -89,17 +88,6 @@ void VideoCaptureHandlerProxy::OnFrameReady(
frame));
}
-void VideoCaptureHandlerProxy::OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnDeviceInfoReceivedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture),
- device_info));
-}
-
void VideoCaptureHandlerProxy::OnStartedOnMainThread(
VideoCapture* capture,
const VideoCaptureState& state) {
@@ -144,12 +132,4 @@ void VideoCaptureHandlerProxy::OnFrameReadyOnMainThread(
proxied_->OnFrameReady(capture, frame);
}
-void VideoCaptureHandlerProxy::OnDeviceInfoReceivedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- const VideoCaptureParams& device_info) {
- state_ = state;
- proxied_->OnDeviceInfoReceived(capture, device_info);
-}
-
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.h b/chromium/media/video/capture/video_capture_proxy.h
index fbb75776abe..fca0a80add7 100644
--- a/chromium/media/video/capture/video_capture_proxy.h
+++ b/chromium/media/video/capture/video_capture_proxy.h
@@ -28,10 +28,8 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
: public VideoCapture::EventHandler {
public:
struct VideoCaptureState {
- VideoCaptureState() : started(false), width(0), height(0), frame_rate(0) {}
+ VideoCaptureState() : started(false), frame_rate(0) {}
bool started;
- int width;
- int height;
int frame_rate;
};
@@ -52,9 +50,6 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
virtual void OnRemoved(VideoCapture* capture) OVERRIDE;
virtual void OnFrameReady(VideoCapture* capture,
const scoped_refptr<VideoFrame>& frame) OVERRIDE;
- virtual void OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) OVERRIDE;
private:
// Called on main thread.
@@ -77,9 +72,6 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
void OnFrameReadyOnMainThread(VideoCapture* capture,
const VideoCaptureState& state,
const scoped_refptr<VideoFrame>& frame);
- void OnDeviceInfoReceivedOnMainThread(VideoCapture* capture,
- const VideoCaptureState& state,
- const VideoCaptureParams& device_info);
// Only accessed from main thread.
VideoCapture::EventHandler* proxied_;
diff --git a/chromium/media/video/capture/video_capture_types.cc b/chromium/media/video/capture/video_capture_types.cc
index 5b8e2265360..aee3865a57b 100644
--- a/chromium/media/video/capture/video_capture_types.cc
+++ b/chromium/media/video/capture/video_capture_types.cc
@@ -9,52 +9,26 @@
namespace media {
VideoCaptureFormat::VideoCaptureFormat()
- : width(0),
- height(0),
- frame_rate(0),
- frame_size_type(ConstantResolutionVideoCaptureDevice) {}
-
-VideoCaptureFormat::VideoCaptureFormat(
- int width,
- int height,
- int frame_rate,
- VideoCaptureResolutionType frame_size_type)
- : width(width),
- height(height),
+ : frame_rate(0), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
+
+VideoCaptureFormat::VideoCaptureFormat(const gfx::Size& frame_size,
+ int frame_rate,
+ VideoPixelFormat pixel_format)
+ : frame_size(frame_size),
frame_rate(frame_rate),
- frame_size_type(frame_size_type) {}
+ pixel_format(pixel_format) {}
bool VideoCaptureFormat::IsValid() const {
- return (width > 0) && (height > 0) && (frame_rate > 0) &&
+ return (frame_size.width() < media::limits::kMaxDimension) &&
+ (frame_size.height() < media::limits::kMaxDimension) &&
+ (frame_size.GetArea() > 0) &&
+ (frame_size.GetArea() < media::limits::kMaxCanvas) &&
+ (frame_rate > 0) &&
(frame_rate < media::limits::kMaxFramesPerSecond) &&
- (width < media::limits::kMaxDimension) &&
- (height < media::limits::kMaxDimension) &&
- (width * height < media::limits::kMaxCanvas) &&
- (frame_size_type >= 0) &&
- (frame_size_type < media::MaxVideoCaptureResolutionType);
+ (pixel_format >= PIXEL_FORMAT_UNKNOWN) &&
+ (pixel_format < PIXEL_FORMAT_MAX);
}
-VideoCaptureParams::VideoCaptureParams()
- : session_id(0) {}
-
-VideoCaptureCapability::VideoCaptureCapability()
- : color(PIXEL_FORMAT_UNKNOWN),
- expected_capture_delay(0),
- interlaced(false),
- session_id(0) {}
-
-VideoCaptureCapability::VideoCaptureCapability(
- int width,
- int height,
- int frame_rate,
- VideoPixelFormat color,
- int delay,
- bool interlaced,
- VideoCaptureResolutionType frame_size_type)
- : VideoCaptureFormat(width, height, frame_rate, frame_size_type),
- color(color),
- expected_capture_delay(delay),
- interlaced(interlaced),
- session_id(0) {}
+VideoCaptureParams::VideoCaptureParams() : allow_resolution_change(false) {}
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_types.h b/chromium/media/video/capture/video_capture_types.h
index 1a170aaf5e7..6a4f453280b 100644
--- a/chromium/media/video/capture/video_capture_types.h
+++ b/chromium/media/video/capture/video_capture_types.h
@@ -5,7 +5,10 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
-#include "media/base/video_frame.h"
+#include <vector>
+
+#include "media/base/media_export.h"
+#include "ui/gfx/size.h"
namespace media {
@@ -13,12 +16,6 @@ namespace media {
// shared with device manager.
typedef int VideoCaptureSessionId;
-enum VideoCaptureResolutionType {
- ConstantResolutionVideoCaptureDevice = 0,
- VariableResolutionVideoCaptureDevice,
- MaxVideoCaptureResolutionType, // Must be last.
-};
-
// Color formats from camera.
enum VideoPixelFormat {
PIXEL_FORMAT_UNKNOWN, // Color format not set.
@@ -30,51 +27,44 @@ enum VideoPixelFormat {
PIXEL_FORMAT_MJPEG,
PIXEL_FORMAT_NV21,
PIXEL_FORMAT_YV12,
+ PIXEL_FORMAT_MAX,
};
// Video capture format specification.
+// This class is used by the video capture device to specify the format of every
+// frame captured and returned to a client. It is also used to specify a
+// supported capture format by a device.
class MEDIA_EXPORT VideoCaptureFormat {
public:
VideoCaptureFormat();
- VideoCaptureFormat(int width,
- int height,
+ VideoCaptureFormat(const gfx::Size& frame_size,
int frame_rate,
- VideoCaptureResolutionType frame_size_type);
+ VideoPixelFormat pixel_format);
// Checks that all values are in the expected range. All limits are specified
// in media::Limits.
bool IsValid() const;
- int width;
- int height;
+ gfx::Size frame_size;
int frame_rate;
- VideoCaptureResolutionType frame_size_type;
+ VideoPixelFormat pixel_format;
};
-// Parameters for starting video capture and device information.
-class MEDIA_EXPORT VideoCaptureParams : public VideoCaptureFormat {
+typedef std::vector<VideoCaptureFormat> VideoCaptureFormats;
+
+// Parameters for starting video capture.
+// This class is used by the client of a video capture device to specify the
+// format of frames in which the client would like to have captured frames
+// returned.
+class MEDIA_EXPORT VideoCaptureParams {
public:
VideoCaptureParams();
- VideoCaptureSessionId session_id;
-};
-
-// Capabilities describe the format a camera capture video in.
-class MEDIA_EXPORT VideoCaptureCapability : public VideoCaptureFormat {
- public:
- VideoCaptureCapability();
- VideoCaptureCapability(int width,
- int height,
- int frame_rate,
- VideoPixelFormat color,
- int delay,
- bool interlaced,
- VideoCaptureResolutionType frame_size_type);
+ // Requests a resolution and format at which the capture will occur.
+ VideoCaptureFormat requested_format;
- VideoPixelFormat color; // Desired video type.
- int expected_capture_delay; // Expected delay in millisecond.
- bool interlaced; // Need interlace format.
- VideoCaptureSessionId session_id;
+ // Allow mid-capture resolution change.
+ bool allow_resolution_change;
};
} // namespace media
diff --git a/chromium/media/video/capture/win/capability_list_win.cc b/chromium/media/video/capture/win/capability_list_win.cc
index 18325bb6398..bfa58edcc4b 100644
--- a/chromium/media/video/capture/win/capability_list_win.cc
+++ b/chromium/media/video/capture/win/capability_list_win.cc
@@ -33,7 +33,8 @@ bool CompareFrameRate(const ResolutionDiff& item1,
}
bool CompareColor(const ResolutionDiff& item1, const ResolutionDiff& item2) {
- return item1.capability->color < item2.capability->color;
+ return item1.capability->supported_format.pixel_format <
+ item2.capability->supported_format.pixel_format;
}
} // namespace.
@@ -50,7 +51,7 @@ void CapabilityList::Add(const VideoCaptureCapabilityWin& capability) {
capabilities_.push_back(capability);
}
-const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedCapability(
+const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedFormat(
int requested_width,
int requested_height,
int requested_frame_rate) const {
@@ -65,8 +66,9 @@ const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedCapability(
it != capabilities_.end(); ++it) {
ResolutionDiff diff;
diff.capability = &(*it);
- diff.diff_width = it->width - requested_width;
- diff.diff_height = it->height - requested_height;
+ diff.diff_width = it->supported_format.frame_size.width() - requested_width;
+ diff.diff_height =
+ it->supported_format.frame_size.height() - requested_height;
// The 1000 allows using integer arithmetic for f.i. 29.971 fps.
diff.diff_frame_rate =
1000 * ((static_cast<float>(it->frame_rate_numerator) /
diff --git a/chromium/media/video/capture/win/capability_list_win.h b/chromium/media/video/capture/win/capability_list_win.h
index c07b220b0d5..bf1e8d6ee89 100644
--- a/chromium/media/video/capture/win/capability_list_win.h
+++ b/chromium/media/video/capture/win/capability_list_win.h
@@ -11,12 +11,13 @@
#include <list>
+#include "base/basictypes.h"
#include "base/threading/non_thread_safe.h"
#include "media/video/capture/video_capture_types.h"
namespace media {
-struct VideoCaptureCapabilityWin : public VideoCaptureCapability {
+struct VideoCaptureCapabilityWin {
explicit VideoCaptureCapabilityWin(int index)
: stream_index(index),
frame_rate_numerator(0),
@@ -26,6 +27,7 @@ struct VideoCaptureCapabilityWin : public VideoCaptureCapability {
// so framerates can be properly represented, f.i. 29.971fps= 30000/1001.
int frame_rate_numerator;
int frame_rate_denominator;
+ VideoCaptureFormat supported_format;
};
class CapabilityList : public base::NonThreadSafe {
@@ -41,8 +43,9 @@ class CapabilityList : public base::NonThreadSafe {
// Loops through the list of capabilities and returns an index of the best
// matching capability. The algorithm prioritizes height, width, frame rate
// and color format in that order.
- const VideoCaptureCapabilityWin& GetBestMatchedCapability(
- int requested_width, int requested_height,
+ const VideoCaptureCapabilityWin& GetBestMatchedFormat(
+ int requested_width,
+ int requested_height,
int requested_frame_rate) const;
private:
diff --git a/chromium/media/video/capture/win/sink_filter_win.cc b/chromium/media/video/capture/win/sink_filter_win.cc
index c3fc410dd7f..e3bb0a58564 100644
--- a/chromium/media/video/capture/win/sink_filter_win.cc
+++ b/chromium/media/video/capture/win/sink_filter_win.cc
@@ -28,13 +28,12 @@ SinkFilter::~SinkFilter() {
input_pin_->SetOwner(NULL);
}
-void SinkFilter::SetRequestedMediaCapability(
- const VideoCaptureCapability& capability) {
- input_pin_->SetRequestedMediaCapability(capability);
+void SinkFilter::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
+ input_pin_->SetRequestedMediaFormat(format);
}
-const VideoCaptureCapability& SinkFilter::ResultingCapability() {
- return input_pin_->ResultingCapability();
+const VideoCaptureFormat& SinkFilter::ResultingFormat() {
+ return input_pin_->ResultingFormat();
}
size_t SinkFilter::NoOfPins() {
diff --git a/chromium/media/video/capture/win/sink_filter_win.h b/chromium/media/video/capture/win/sink_filter_win.h
index 36bb124cc9d..e454f0b984b 100644
--- a/chromium/media/video/capture/win/sink_filter_win.h
+++ b/chromium/media/video/capture/win/sink_filter_win.h
@@ -32,11 +32,10 @@ class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3"))
explicit SinkFilter(SinkFilterObserver* observer);
virtual ~SinkFilter();
- void SetRequestedMediaCapability(
- const VideoCaptureCapability& capability);
- // Returns the capability that is negotiated when this
+ void SetRequestedMediaFormat(const VideoCaptureFormat& format);
+ // Returns the format that is negotiated when this
// filter is connected to a media filter.
- const VideoCaptureCapability& ResultingCapability();
+ const VideoCaptureFormat& ResultingFormat();
// Implement FilterBase.
virtual size_t NoOfPins();
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.cc b/chromium/media/video/capture/win/sink_input_pin_win.cc
index 1de1ea1671a..0126e13db8f 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.cc
+++ b/chromium/media/video/capture/win/sink_input_pin_win.cc
@@ -20,8 +20,6 @@ SinkInputPin::SinkInputPin(IBaseFilter* filter,
SinkFilterObserver* observer)
: observer_(observer),
PinBase(filter) {
- memset(&requested_capability_, 0, sizeof(requested_capability_));
- memset(&resulting_capability_, 0, sizeof(resulting_capability_));
}
SinkInputPin::~SinkInputPin() {}
@@ -38,9 +36,9 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
- if (requested_capability_.frame_rate > 0) {
- pvi->AvgTimePerFrame = kSecondsToReferenceTime /
- requested_capability_.frame_rate;
+ if (requested_format_.frame_rate > 0) {
+ pvi->AvgTimePerFrame =
+ kSecondsToReferenceTime / requested_format_.frame_rate;
}
media_type->majortype = MEDIATYPE_Video;
@@ -51,30 +49,28 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
case 0: {
pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
pvi->bmiHeader.biBitCount = 12; // bit per pixel
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 3 * requested_capability_.height *
- requested_capability_.width / 2;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage =
+ requested_format_.frame_size.GetArea() * 3 / 2;
media_type->subtype = kMediaSubTypeI420;
break;
}
case 1: {
pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
pvi->bmiHeader.biBitCount = 16;
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 2 * requested_capability_.width *
- requested_capability_.height;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 2;
media_type->subtype = MEDIASUBTYPE_YUY2;
break;
}
case 2: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24;
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 3 * requested_capability_.height *
- requested_capability_.width;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 3;
media_type->subtype = MEDIASUBTYPE_RGB24;
break;
}
@@ -104,27 +100,27 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
return false;
// Store the incoming width and height.
- resulting_capability_.width = pvi->bmiHeader.biWidth;
- resulting_capability_.height = abs(pvi->bmiHeader.biHeight);
+ resulting_format_.frame_size.SetSize(pvi->bmiHeader.biWidth,
+ abs(pvi->bmiHeader.biHeight));
if (pvi->AvgTimePerFrame > 0) {
- resulting_capability_.frame_rate =
+ resulting_format_.frame_rate =
static_cast<int>(kSecondsToReferenceTime / pvi->AvgTimePerFrame);
} else {
- resulting_capability_.frame_rate = requested_capability_.frame_rate;
+ resulting_format_.frame_rate = requested_format_.frame_rate;
}
if (sub_type == kMediaSubTypeI420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
- resulting_capability_.color = PIXEL_FORMAT_I420;
+ resulting_format_.pixel_format = PIXEL_FORMAT_I420;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
- resulting_capability_.color = PIXEL_FORMAT_YUY2;
+ resulting_format_.pixel_format = PIXEL_FORMAT_YUY2;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
- resulting_capability_.color = PIXEL_FORMAT_RGB24;
+ resulting_format_.pixel_format = PIXEL_FORMAT_RGB24;
return true; // This format is acceptable.
}
return false;
@@ -140,19 +136,15 @@ HRESULT SinkInputPin::Receive(IMediaSample* sample) {
return S_OK;
}
-void SinkInputPin::SetRequestedMediaCapability(
- const VideoCaptureCapability& capability) {
- requested_capability_ = capability;
- resulting_capability_.width = 0;
- resulting_capability_.height = 0;
- resulting_capability_.frame_rate = 0;
- resulting_capability_.color = PIXEL_FORMAT_UNKNOWN;
- resulting_capability_.expected_capture_delay = 0;
- resulting_capability_.interlaced = false;
+void SinkInputPin::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
+ requested_format_ = format;
+ resulting_format_.frame_size.SetSize(0, 0);
+ resulting_format_.frame_rate = 0;
+ resulting_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
}
-const VideoCaptureCapability& SinkInputPin::ResultingCapability() {
- return resulting_capability_;
+const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
+ return resulting_format_;
}
} // namespace media
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.h b/chromium/media/video/capture/win/sink_input_pin_win.h
index 16168a39a1e..f14ca33073c 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.h
+++ b/chromium/media/video/capture/win/sink_input_pin_win.h
@@ -24,10 +24,10 @@ class SinkInputPin : public PinBase {
SinkInputPin(IBaseFilter* filter, SinkFilterObserver* observer);
virtual ~SinkInputPin();
- void SetRequestedMediaCapability(const VideoCaptureCapability& capability);
+ void SetRequestedMediaFormat(const VideoCaptureFormat& format);
// Returns the capability that is negotiated when this
// pin is connected to a media filter.
- const VideoCaptureCapability& ResultingCapability();
+ const VideoCaptureFormat& ResultingFormat();
// Implement PinBase.
virtual bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type);
@@ -36,8 +36,8 @@ class SinkInputPin : public PinBase {
STDMETHOD(Receive)(IMediaSample* media_sample);
private:
- VideoCaptureCapability requested_capability_;
- VideoCaptureCapability resulting_capability_;
+ VideoCaptureFormat requested_format_;
+ VideoCaptureFormat resulting_format_;
SinkFilterObserver* observer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SinkInputPin);
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
index 874408fb2cd..cc1e7505dbe 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
@@ -94,12 +94,11 @@ bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format) {
return false;
}
-bool GetFrameSize(IMFMediaType* type, int* width, int* height) {
+bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
UINT32 width32, height32;
if (FAILED(MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width32, &height32)))
return false;
- *width = width32;
- *height = height32;
+ frame_size->SetSize(width32, height32);
return true;
}
@@ -121,20 +120,17 @@ bool FillCapabilitiesFromType(IMFMediaType* type,
VideoCaptureCapabilityWin* capability) {
GUID type_guid;
if (FAILED(type->GetGUID(MF_MT_SUBTYPE, &type_guid)) ||
- !FormatFromGuid(type_guid, &capability->color) ||
- !GetFrameSize(type, &capability->width, &capability->height) ||
+ !GetFrameSize(type, &capability->supported_format.frame_size) ||
!GetFrameRate(type,
&capability->frame_rate_numerator,
- &capability->frame_rate_denominator)) {
+ &capability->frame_rate_denominator) ||
+ !FormatFromGuid(type_guid, &capability->supported_format.pixel_format)) {
return false;
}
// Keep the integer version of the frame_rate for (potential) returns.
- capability->frame_rate =
+ capability->supported_format.frame_rate =
capability->frame_rate_numerator / capability->frame_rate_denominator;
- capability->expected_capture_delay = 0; // Currently not used.
- capability->interlaced = false; // Currently not used.
-
return true;
}
@@ -210,7 +206,7 @@ class MFReaderCallback
DWORD stream_flags, LONGLONG time_stamp, IMFSample* sample) {
base::Time stamp(base::Time::Now());
if (!sample) {
- observer_->OnIncomingCapturedFrame(NULL, 0, stamp, 0, false, false);
+ observer_->OnIncomingCapturedFrame(NULL, 0, stamp, 0);
return S_OK;
}
@@ -224,8 +220,7 @@ class MFReaderCallback
DWORD length = 0, max_length = 0;
BYTE* data = NULL;
buffer->Lock(&data, &max_length, &length);
- observer_->OnIncomingCapturedFrame(data, length, stamp,
- 0, false, false);
+ observer_->OnIncomingCapturedFrame(data, length, stamp, 0);
buffer->Unlock();
}
}
@@ -312,7 +307,7 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
}
VideoCaptureDeviceMFWin::VideoCaptureDeviceMFWin(const Name& device_name)
- : name_(device_name), observer_(NULL), capture_(0) {
+ : name_(device_name), capture_(0) {
DetachFromThread();
}
@@ -339,19 +334,14 @@ bool VideoCaptureDeviceMFWin::Init() {
reader_.Receive()));
}
-void VideoCaptureDeviceMFWin::Allocate(
- const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) {
+void VideoCaptureDeviceMFWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(CalledOnValidThread());
base::AutoLock lock(lock_);
- if (observer_) {
- DCHECK_EQ(observer, observer_);
- return;
- }
-
- observer_ = observer;
+ client_ = client.Pass();
DCHECK_EQ(capture_, false);
CapabilityList capabilities;
@@ -361,14 +351,11 @@ void VideoCaptureDeviceMFWin::Allocate(
return;
}
- const VideoCaptureCapabilityWin& found_capability =
- capabilities.GetBestMatchedCapability(capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
- DLOG(INFO) << "Chosen capture format= (" << found_capability.width << "x"
- << found_capability.height << ")@("
- << found_capability.frame_rate_numerator << "/"
- << found_capability.frame_rate_denominator << ")fps";
+ VideoCaptureCapabilityWin found_capability =
+ capabilities.GetBestMatchedFormat(
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
ScopedComPtr<IMFMediaType> type;
if (FAILED(hr = reader_->GetNativeMediaType(
@@ -380,25 +367,16 @@ void VideoCaptureDeviceMFWin::Allocate(
return;
}
- observer_->OnFrameInfo(found_capability);
-}
-
-void VideoCaptureDeviceMFWin::Start() {
- DCHECK(CalledOnValidThread());
-
- base::AutoLock lock(lock_);
- if (!capture_) {
- capture_ = true;
- HRESULT hr;
- if (FAILED(hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
- NULL, NULL, NULL, NULL))) {
- OnError(hr);
- capture_ = false;
- }
+ if (FAILED(hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
+ NULL, NULL, NULL, NULL))) {
+ OnError(hr);
+ return;
}
+ capture_format_ = found_capability.supported_format;
+ capture_ = true;
}
-void VideoCaptureDeviceMFWin::Stop() {
+void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
DCHECK(CalledOnValidThread());
base::WaitableEvent flushed(false, false);
const int kFlushTimeOutInMs = 1000;
@@ -412,9 +390,9 @@ void VideoCaptureDeviceMFWin::Stop() {
wait = SUCCEEDED(hr);
if (!wait) {
callback_->SetSignalOnFlush(NULL);
- OnError(hr);
}
}
+ client_.reset();
}
// If the device has been unplugged, the Flush() won't trigger the event
@@ -426,31 +404,18 @@ void VideoCaptureDeviceMFWin::Stop() {
flushed.TimedWait(base::TimeDelta::FromMilliseconds(kFlushTimeOutInMs));
}
-void VideoCaptureDeviceMFWin::DeAllocate() {
- DCHECK(CalledOnValidThread());
-
- Stop();
-
- base::AutoLock lock(lock_);
- observer_ = NULL;
-}
-
-const VideoCaptureDevice::Name& VideoCaptureDeviceMFWin::device_name() {
- DCHECK(CalledOnValidThread());
- return name_;
-}
-
void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
const uint8* data,
int length,
const base::Time& time_stamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz) {
+ int rotation) {
base::AutoLock lock(lock_);
- if (data && observer_)
- observer_->OnIncomingCapturedFrame(data, length, time_stamp,
- rotation, flip_vert, flip_horiz);
+ if (data && client_.get())
+ client_->OnIncomingCapturedFrame(data,
+ length,
+ time_stamp,
+ rotation,
+ capture_format_);
if (capture_) {
HRESULT hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
@@ -468,8 +433,8 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
void VideoCaptureDeviceMFWin::OnError(HRESULT hr) {
DLOG(ERROR) << "VideoCaptureDeviceMFWin: " << std::hex << hr;
- if (observer_)
- observer_->OnError();
+ if (client_.get())
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/video/capture/win/video_capture_device_mf_win.h
index 2daa03535dd..8f7fc75cf45 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.h
@@ -28,7 +28,7 @@ class MFReaderCallback;
class MEDIA_EXPORT VideoCaptureDeviceMFWin
: public base::NonThreadSafe,
- public VideoCaptureDevice1 {
+ public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceMFWin(const Name& device_name);
virtual ~VideoCaptureDeviceMFWin();
@@ -38,12 +38,10 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
// Returns true iff the current platform supports the Media Foundation API
// and that the DLLs are available. On Vista this API is an optional download
@@ -59,9 +57,7 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
const uint8* data,
int length,
const base::Time& time_stamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz);
+ int rotation);
private:
void OnError(HRESULT hr);
@@ -71,8 +67,9 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
scoped_refptr<MFReaderCallback> callback_;
base::Lock lock_; // Used to guard the below variables.
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
base::win::ScopedComPtr<IMFSourceReader> reader_;
+ VideoCaptureFormat capture_format_;
bool capture_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceMFWin);
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/video/capture/win/video_capture_device_win.cc
index 307ab2967bf..00056a70168 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_win.cc
@@ -11,10 +11,12 @@
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/win/metro.h"
+#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
#include "media/base/media_switches.h"
#include "media/video/capture/win/video_capture_device_mf_win.h"
+using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
using base::win::ScopedVariant;
@@ -163,6 +165,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
+// static
VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDevice* ret = NULL;
if (device_name.capture_api_type() == Name::MEDIA_FOUNDATION) {
@@ -257,8 +265,7 @@ void VideoCaptureDeviceWin::GetDeviceNames(Names* device_names) {
VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
: device_name_(device_name),
- state_(kIdle),
- observer_(NULL) {
+ state_(kIdle) {
DetachFromThread();
}
@@ -333,26 +340,27 @@ bool VideoCaptureDeviceWin::Init() {
return CreateCapabilityMap();
}
-void VideoCaptureDeviceWin::Allocate(
- const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) {
+void VideoCaptureDeviceWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(CalledOnValidThread());
if (state_ != kIdle)
return;
- observer_ = observer;
+ client_ = client.Pass();
// Get the camera capability that best match the requested resolution.
const VideoCaptureCapabilityWin& found_capability =
- capabilities_.GetBestMatchedCapability(capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
- VideoCaptureCapability capability = found_capability;
+ capabilities_.GetBestMatchedFormat(
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
+ VideoCaptureFormat format = found_capability.supported_format;
// Reduce the frame rate if the requested frame rate is lower
// than the capability.
- if (capability.frame_rate > capture_format.frame_rate)
- capability.frame_rate = capture_format.frame_rate;
+ if (format.frame_rate > params.requested_format.frame_rate)
+ format.frame_rate = params.requested_format.frame_rate;
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
@@ -370,20 +378,19 @@ void VideoCaptureDeviceWin::Allocate(
if (SUCCEEDED(hr)) {
if (pmt->formattype == FORMAT_VideoInfo) {
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
- if (capability.frame_rate > 0)
- h->AvgTimePerFrame = kSecondsToReferenceTime / capability.frame_rate;
+ if (format.frame_rate > 0)
+ h->AvgTimePerFrame = kSecondsToReferenceTime / format.frame_rate;
}
- // Set the sink filter to request this capability.
- sink_filter_->SetRequestedMediaCapability(capability);
- // Order the capture device to use this capability.
+ // Set the sink filter to request this format.
+ sink_filter_->SetRequestedMediaFormat(format);
+ // Order the capture device to use this format.
hr = stream_config->SetFormat(pmt);
}
if (FAILED(hr))
SetErrorState("Failed to set capture device output format");
- if (capability.color == PIXEL_FORMAT_MJPEG &&
- !mjpg_filter_.get()) {
+ if (format.pixel_format == PIXEL_FORMAT_MJPEG && !mjpg_filter_.get()) {
// Create MJPG filter if we need it.
hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
@@ -401,8 +408,7 @@ void VideoCaptureDeviceWin::Allocate(
}
}
- if (capability.color == PIXEL_FORMAT_MJPEG &&
- mjpg_filter_.get()) {
+ if (format.pixel_format == PIXEL_FORMAT_MJPEG && mjpg_filter_.get()) {
// Connect the camera to the MJPEG decoder.
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
NULL);
@@ -426,21 +432,12 @@ void VideoCaptureDeviceWin::Allocate(
return;
}
- // Get the capability back from the sink filter after the filter have been
+ // Get the format back from the sink filter after the filter have been
// connected.
- const VideoCaptureCapability& used_capability
- = sink_filter_->ResultingCapability();
- observer_->OnFrameInfo(used_capability);
+ capture_format_ = sink_filter_->ResultingFormat();
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceWin::Start() {
- DCHECK(CalledOnValidThread());
- if (state_ != kAllocated)
- return;
-
- HRESULT hr = media_control_->Run();
+ // Start capturing.
+ hr = media_control_->Run();
if (FAILED(hr)) {
SetErrorState("Failed to start the Capture device.");
return;
@@ -449,7 +446,7 @@ void VideoCaptureDeviceWin::Start() {
state_ = kCapturing;
}
-void VideoCaptureDeviceWin::Stop() {
+void VideoCaptureDeviceWin::StopAndDeAllocate() {
DCHECK(CalledOnValidThread());
if (state_ != kCapturing)
return;
@@ -460,15 +457,6 @@ void VideoCaptureDeviceWin::Stop() {
return;
}
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceWin::DeAllocate() {
- DCHECK(CalledOnValidThread());
- if (state_ == kIdle)
- return;
-
- HRESULT hr = media_control_->Stop();
graph_builder_->Disconnect(output_capture_pin_);
graph_builder_->Disconnect(input_sink_pin_);
@@ -482,20 +470,15 @@ void VideoCaptureDeviceWin::DeAllocate() {
SetErrorState("Failed to Stop the Capture device");
return;
}
-
+ client_.reset();
state_ = kIdle;
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceWin::device_name() {
- DCHECK(CalledOnValidThread());
- return device_name_;
-}
-
// Implements SinkFilterObserver::SinkFilterObserver.
void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer,
int length) {
- observer_->OnIncomingCapturedFrame(buffer, length, base::Time::Now(),
- 0, false, false);
+ client_->OnIncomingCapturedFrame(
+ buffer, length, base::Time::Now(), 0, capture_format_);
}
bool VideoCaptureDeviceWin::CreateCapabilityMap() {
@@ -526,7 +509,9 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
for (int i = 0; i < count; ++i) {
hr = stream_config->GetStreamCaps(i, &media_type,
reinterpret_cast<BYTE*>(&caps));
- if (FAILED(hr)) {
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK) {
DVLOG(2) << "Failed to GetStreamCaps";
return false;
}
@@ -534,21 +519,19 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
if (media_type->majortype == MEDIATYPE_Video &&
media_type->formattype == FORMAT_VideoInfo) {
VideoCaptureCapabilityWin capability(i);
- REFERENCE_TIME time_per_frame = 0;
-
VIDEOINFOHEADER* h =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- capability.width = h->bmiHeader.biWidth;
- capability.height = h->bmiHeader.biHeight;
- time_per_frame = h->AvgTimePerFrame;
+ capability.supported_format.frame_size.SetSize(h->bmiHeader.biWidth,
+ h->bmiHeader.biHeight);
- // Try to get the max frame rate from IAMVideoControl.
+ // Try to get a better |time_per_frame| from IAMVideoControl. If not, use
+ // the value from VIDEOINFOHEADER.
+ REFERENCE_TIME time_per_frame = h->AvgTimePerFrame;
if (video_control) {
- LONGLONG* max_fps_ptr;
- LONG list_size;
- SIZE size;
- size.cx = capability.width;
- size.cy = capability.height;
+ ScopedCoMem<LONGLONG> max_fps;
+ LONG list_size = 0;
+ SIZE size = {capability.supported_format.frame_size.width(),
+ capability.supported_format.frame_size.height()};
// GetFrameRateList doesn't return max frame rate always
// eg: Logitech Notebook. This may be due to a bug in that API
@@ -556,45 +539,42 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
// a util method written. Can't assume the first value will return
// the max fps.
hr = video_control->GetFrameRateList(output_capture_pin_, i, size,
- &list_size, &max_fps_ptr);
-
- if (SUCCEEDED(hr) && list_size > 0) {
- int min_time = *std::min_element(max_fps_ptr,
- max_fps_ptr + list_size);
- capability.frame_rate = (min_time > 0) ?
- kSecondsToReferenceTime / min_time : 0;
- } else {
- // Get frame rate from VIDEOINFOHEADER.
- capability.frame_rate = (time_per_frame > 0) ?
- static_cast<int>(kSecondsToReferenceTime / time_per_frame) : 0;
+ &list_size, &max_fps);
+ // Sometimes |list_size| will be > 0, but max_fps will be NULL. Some
+ // drivers may return an HRESULT of S_FALSE which SUCCEEDED() translates
+ // into success, so explicitly check S_OK. See http://crbug.com/306237.
+ if (hr == S_OK && list_size > 0 && max_fps) {
+ time_per_frame = *std::min_element(max_fps.get(),
+ max_fps.get() + list_size);
}
- } else {
- // Get frame rate from VIDEOINFOHEADER since IAMVideoControl is
- // not supported.
- capability.frame_rate = (time_per_frame > 0) ?
- static_cast<int>(kSecondsToReferenceTime / time_per_frame) : 0;
}
+
+ capability.supported_format.frame_rate =
+ (time_per_frame > 0)
+ ? static_cast<int>(kSecondsToReferenceTime / time_per_frame)
+ : 0;
+
// DirectShow works at the moment only on integer frame_rate but the
// best capability matching class works on rational frame rates.
- capability.frame_rate_numerator = capability.frame_rate;
+ capability.frame_rate_numerator = capability.supported_format.frame_rate;
capability.frame_rate_denominator = 1;
// We can't switch MEDIATYPE :~(.
if (media_type->subtype == kMediaSubTypeI420) {
- capability.color = PIXEL_FORMAT_I420;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_IYUV) {
// This is identical to PIXEL_FORMAT_I420.
- capability.color = PIXEL_FORMAT_I420;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_RGB24) {
- capability.color = PIXEL_FORMAT_RGB24;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_RGB24;
} else if (media_type->subtype == MEDIASUBTYPE_YUY2) {
- capability.color = PIXEL_FORMAT_YUY2;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_YUY2;
} else if (media_type->subtype == MEDIASUBTYPE_MJPG) {
- capability.color = PIXEL_FORMAT_MJPEG;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_MJPEG;
} else if (media_type->subtype == MEDIASUBTYPE_UYVY) {
- capability.color = PIXEL_FORMAT_UYVY;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_UYVY;
} else if (media_type->subtype == MEDIASUBTYPE_ARGB32) {
- capability.color = PIXEL_FORMAT_ARGB;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_ARGB;
} else {
WCHAR guid_str[128];
StringFromGUID2(media_type->subtype, guid_str, arraysize(guid_str));
@@ -614,6 +594,6 @@ void VideoCaptureDeviceWin::SetErrorState(const char* reason) {
DCHECK(CalledOnValidThread());
DVLOG(1) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/video/capture/win/video_capture_device_win.h
index 4c83d6b3062..164c01c9e26 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_win.h
@@ -30,7 +30,7 @@ namespace media {
// All the methods in the class can only be run on a COM initialized thread.
class VideoCaptureDeviceWin
: public base::NonThreadSafe,
- public VideoCaptureDevice1,
+ public VideoCaptureDevice,
public SinkFilterObserver {
public:
explicit VideoCaptureDeviceWin(const Name& device_name);
@@ -40,19 +40,16 @@ class VideoCaptureDeviceWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
static void GetDeviceNames(Names* device_names);
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
- kAllocated, // The camera has been allocated and can be started.
kCapturing, // Video is being captured.
kError // Error accessing HW functions.
// User needs to recover by destroying the object.
@@ -66,7 +63,7 @@ class VideoCaptureDeviceWin
Name device_name_;
InternalState state_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
base::win::ScopedComPtr<IBaseFilter> capture_filter_;
base::win::ScopedComPtr<IGraphBuilder> graph_builder_;
@@ -82,6 +79,7 @@ class VideoCaptureDeviceWin
// Map of all capabilities this device support.
CapabilityList capabilities_;
+ VideoCaptureFormat capture_format_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceWin);
};
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index 1aa0954ba96..5212db2c488 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -90,9 +90,9 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// Returns true when command successfully accepted. Otherwise false.
virtual bool Initialize(VideoCodecProfile profile) = 0;
- // Decodes given bitstream buffer. Once decoder is done with processing
- // |bitstream_buffer| it will call NotifyEndOfBitstreamBuffer() with the
- // bitstream buffer id.
+ // Decodes given bitstream buffer that contains at most one frame. Once
+ // decoder is done with processing |bitstream_buffer| it will call
+ // NotifyEndOfBitstreamBuffer() with the bitstream buffer id.
// Parameters:
// |bitstream_buffer| is the input bitstream that is sent for decoding.
virtual void Decode(const BitstreamBuffer& bitstream_buffer) = 0;
diff --git a/chromium/media/webm/chromeos/ebml_writer.cc b/chromium/media/webm/chromeos/ebml_writer.cc
index 5c5f07db718..84a9760769f 100644
--- a/chromium/media/webm/chromeos/ebml_writer.cc
+++ b/chromium/media/webm/chromeos/ebml_writer.cc
@@ -7,7 +7,7 @@
#include "media/base/media_export.h"
extern "C" {
-#include "third_party/libvpx/source/libvpx/libmkv/EbmlWriter.h"
+#include "third_party/libvpx/source/libvpx/third_party/libmkv/EbmlWriter.h"
EbmlGlobal::EbmlGlobal() {
}
diff --git a/chromium/media/webm/chromeos/webm_encoder.cc b/chromium/media/webm/chromeos/webm_encoder.cc
index af4d871cebb..059f9c6fef6 100644
--- a/chromium/media/webm/chromeos/webm_encoder.cc
+++ b/chromium/media/webm/chromeos/webm_encoder.cc
@@ -16,8 +16,8 @@ extern "C" {
// Getting the right degree of C compatibility has been a constant struggle.
// - Stroustrup, C++ Report, 12(7), July/August 2000.
#define private priv
-#include "third_party/libvpx/source/libvpx/libmkv/EbmlIDs.h"
-#include "third_party/libvpx/source/libvpx/libmkv/EbmlWriter.h"
+#include "third_party/libvpx/source/libvpx/third_party/libmkv/EbmlIDs.h"
+#include "third_party/libvpx/source/libvpx/third_party/libmkv/EbmlWriter.h"
#undef private
}
@@ -157,7 +157,7 @@ bool WebmEncoder::EncodeFromSprite(const SkBitmap& sprite,
}
bool WebmEncoder::WriteWebmHeader() {
- output_ = file_util::OpenFile(output_path_, "wb");
+ output_ = base::OpenFile(output_path_, "wb");
if (!output_)
return false;
@@ -251,7 +251,7 @@ bool WebmEncoder::WriteWebmFooter() {
EndSubElement(); // Cluster
EndSubElement(); // Segment
DCHECK(ebml_sub_elements_.empty());
- return file_util::CloseFile(output_) && !has_errors_;
+ return base::CloseFile(output_) && !has_errors_;
}
void WebmEncoder::StartSubElement(unsigned long class_id) {
diff --git a/chromium/media/webm/tracks_builder.cc b/chromium/media/webm/tracks_builder.cc
index 370ca82a188..3ad59530514 100644
--- a/chromium/media/webm/tracks_builder.cc
+++ b/chromium/media/webm/tracks_builder.cc
@@ -118,10 +118,12 @@ TracksBuilder::~TracksBuilder() {}
void TracksBuilder::AddTrack(
int track_num,
int track_type,
+ int track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language) {
- tracks_.push_back(Track(track_num, track_type, codec_id, name, language));
+ tracks_.push_back(Track(track_num, track_type, track_uid, codec_id, name,
+ language));
}
std::vector<uint8> TracksBuilder::Finish() {
@@ -159,12 +161,13 @@ void TracksBuilder::WriteTracks(uint8* buf, int buf_size) const {
}
}
-TracksBuilder::Track::Track(int track_num, int track_type,
+TracksBuilder::Track::Track(int track_num, int track_type, int track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language)
: track_num_(track_num),
track_type_(track_type),
+ track_uid_(track_uid),
codec_id_(codec_id),
name_(name),
language_(language) {
@@ -179,6 +182,7 @@ int TracksBuilder::Track::GetPayloadSize() const {
size += IntElementSize(kWebMIdTrackNumber, track_num_);
size += IntElementSize(kWebMIdTrackType, track_type_);
+ size += IntElementSize(kWebMIdTrackUID, track_uid_);
if (!codec_id_.empty())
size += StringElementSize(kWebMIdCodecID, codec_id_);
@@ -197,6 +201,7 @@ void TracksBuilder::Track::Write(uint8** buf, int* buf_size) const {
WriteIntElement(buf, buf_size, kWebMIdTrackNumber, track_num_);
WriteIntElement(buf, buf_size, kWebMIdTrackType, track_type_);
+ WriteIntElement(buf, buf_size, kWebMIdTrackUID, track_uid_);
if (!codec_id_.empty())
WriteStringElement(buf, buf_size, kWebMIdCodecID, codec_id_);
diff --git a/chromium/media/webm/tracks_builder.h b/chromium/media/webm/tracks_builder.h
index 87ceaed3adc..fca9dfe1dce 100644
--- a/chromium/media/webm/tracks_builder.h
+++ b/chromium/media/webm/tracks_builder.h
@@ -18,8 +18,9 @@ class TracksBuilder {
TracksBuilder();
~TracksBuilder();
- void AddTrack(int track_num, int track_type, const std::string& codec_id,
- const std::string& name, const std::string& language);
+ void AddTrack(int track_num, int track_type, int track_uid,
+ const std::string& codec_id, const std::string& name,
+ const std::string& language);
std::vector<uint8> Finish();
@@ -30,8 +31,9 @@ class TracksBuilder {
class Track {
public:
- Track(int track_num, int track_type, const std::string& codec_id,
- const std::string& name, const std::string& language);
+ Track(int track_num, int track_type, int track_uid,
+ const std::string& codec_id, const std::string& name,
+ const std::string& language);
int GetSize() const;
void Write(uint8** buf, int* buf_size) const;
@@ -40,6 +42,7 @@ class TracksBuilder {
int track_num_;
int track_type_;
+ int track_uid_;
std::string codec_id_;
std::string name_;
std::string language_;
diff --git a/chromium/media/webm/webm_cluster_parser.cc b/chromium/media/webm/webm_cluster_parser.cc
index 87cccae4da1..df9e4ce244b 100644
--- a/chromium/media/webm/webm_cluster_parser.cc
+++ b/chromium/media/webm/webm_cluster_parser.cc
@@ -10,8 +10,10 @@
#include "base/sys_byteorder.h"
#include "media/base/buffers.h"
#include "media/base/decrypt_config.h"
+#include "media/filters/webvtt_util.h"
#include "media/webm/webm_constants.h"
#include "media/webm/webm_crypto_helpers.h"
+#include "media/webm/webm_webvtt_parser.h"
namespace media {
@@ -307,6 +309,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
}
Track* track = NULL;
+ bool is_text = false;
std::string encryption_key_id;
if (track_num == audio_.track_num()) {
track = &audio_;
@@ -322,6 +325,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
if (block_duration < 0) // not specified
return false;
track = text_track;
+ is_text = true;
} else {
MEDIA_LOG(log_cb_) << "Unexpected track number " << track_num;
return false;
@@ -339,9 +343,28 @@ bool WebMClusterParser::OnBlock(bool is_simple_block, int track_num,
bool is_keyframe =
is_simple_block ? (flags & 0x80) != 0 : track->IsKeyframe(data, size);
- scoped_refptr<StreamParserBuffer> buffer =
- StreamParserBuffer::CopyFrom(data, size, additional, additional_size,
- is_keyframe);
+ scoped_refptr<StreamParserBuffer> buffer;
+ if (!is_text) {
+ buffer = StreamParserBuffer::CopyFrom(data, size,
+ additional, additional_size,
+ is_keyframe);
+ } else {
+ std::string id, settings, content;
+ WebMWebVTTParser::Parse(data, size,
+ &id, &settings, &content);
+
+ std::vector<uint8> side_data;
+ MakeSideData(id.begin(), id.end(),
+ settings.begin(), settings.end(),
+ &side_data);
+
+ buffer = StreamParserBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(content.data()),
+ content.length(),
+ &side_data[0],
+ side_data.size(),
+ is_keyframe);
+ }
// Every encrypted Block has a signal byte and IV prepended to it. Current
// encrypted WebM request for comments specification is here
diff --git a/chromium/media/webm/webm_cluster_parser_unittest.cc b/chromium/media/webm/webm_cluster_parser_unittest.cc
index 5c5837fa866..691325d7403 100644
--- a/chromium/media/webm/webm_cluster_parser_unittest.cc
+++ b/chromium/media/webm/webm_cluster_parser_unittest.cc
@@ -374,11 +374,10 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
TEST_F(WebMClusterParserTest, ParseTextTracks) {
typedef WebMTracksParser::TextTracks TextTracks;
TextTracks text_tracks;
- WebMTracksParser::TextTrackInfo text_track_info;
- text_track_info.kind = kTextSubtitles;
text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
- text_track_info));
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
@@ -410,12 +409,11 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
typedef WebMTracksParser::TextTracks TextTracks;
- TextTracks text_tracks;
- WebMTracksParser::TextTrackInfo text_track_info;
+ WebMTracksParser::TextTracks text_tracks;
- text_track_info.kind = kTextSubtitles;
text_tracks.insert(std::make_pair(TextTracks::key_type(kTextTrackNum),
- text_track_info));
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
@@ -441,18 +439,17 @@ TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
typedef WebMTracksParser::TextTracks TextTracks;
TextTracks text_tracks;
- WebMTracksParser::TextTrackInfo text_track_info;
const int kSubtitleTextTrackNum = kTextTrackNum;
const int kCaptionTextTrackNum = kTextTrackNum + 1;
- text_track_info.kind = kTextSubtitles;
text_tracks.insert(std::make_pair(TextTracks::key_type(kSubtitleTextTrackNum),
- text_track_info));
+ TextTrackConfig(kTextSubtitles, "", "",
+ "")));
- text_track_info.kind = kTextCaptions;
text_tracks.insert(std::make_pair(TextTracks::key_type(kCaptionTextTrackNum),
- text_track_info));
+ TextTrackConfig(kTextCaptions, "", "",
+ "")));
parser_.reset(new WebMClusterParser(kTimecodeScale,
kAudioTrackNum,
diff --git a/chromium/media/webm/webm_stream_parser.cc b/chromium/media/webm/webm_stream_parser.cc
index 12be4492684..8e7d055e68c 100644
--- a/chromium/media/webm/webm_stream_parser.cc
+++ b/chromium/media/webm/webm_stream_parser.cc
@@ -8,7 +8,6 @@
#include "base/callback.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "media/webm/webm_cluster_parser.h"
#include "media/webm/webm_constants.h"
#include "media/webm/webm_content_encodings.h"
@@ -24,7 +23,6 @@ WebMStreamParser::WebMStreamParser()
}
WebMStreamParser::~WebMStreamParser() {
- STLDeleteValues(&text_track_map_);
}
void WebMStreamParser::Init(const InitCB& init_cb,
@@ -32,7 +30,6 @@ void WebMStreamParser::Init(const InitCB& init_cb,
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
@@ -41,7 +38,6 @@ void WebMStreamParser::Init(const InitCB& init_cb,
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
- DCHECK(!text_cb.is_null());
DCHECK(!need_key_cb.is_null());
DCHECK(!new_segment_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
@@ -52,7 +48,6 @@ void WebMStreamParser::Init(const InitCB& init_cb,
new_buffers_cb_ = new_buffers_cb;
text_cb_ = text_cb;
need_key_cb_ = need_key_cb;
- add_text_track_cb_ = add_text_track_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
@@ -175,7 +170,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
cur_size -= result;
bytes_parsed += result;
- WebMTracksParser tracks_parser(log_cb_, add_text_track_cb_.is_null());
+ WebMTracksParser tracks_parser(log_cb_, text_cb_.is_null());
result = tracks_parser.Parse(cur, cur_size);
if (result <= 0)
@@ -199,37 +194,18 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
if (video_config.is_encrypted())
FireNeedKey(tracks_parser.video_encryption_key_id());
- if (!config_cb_.Run(audio_config, video_config)) {
+ if (!config_cb_.Run(audio_config,
+ video_config,
+ tracks_parser.text_tracks())) {
DVLOG(1) << "New config data isn't allowed.";
return -1;
}
- typedef WebMTracksParser::TextTracks TextTracks;
- const TextTracks& text_tracks = tracks_parser.text_tracks();
-
- for (TextTracks::const_iterator itr = text_tracks.begin();
- itr != text_tracks.end(); ++itr) {
- const WebMTracksParser::TextTrackInfo& text_track_info = itr->second;
-
- // TODO(matthewjheaney): verify that WebVTT uses ISO 639-2 for lang
- scoped_ptr<TextTrack> text_track =
- add_text_track_cb_.Run(text_track_info.kind,
- text_track_info.name,
- text_track_info.language);
-
- // Assume ownership of pointer, and cache the text track object, for use
- // later when we have text track buffers. (The text track objects are
- // deallocated in the dtor for this class.)
-
- if (text_track)
- text_track_map_.insert(std::make_pair(itr->first, text_track.release()));
- }
-
cluster_parser_.reset(new WebMClusterParser(
info_parser.timecode_scale(),
tracks_parser.audio_track_num(),
tracks_parser.video_track_num(),
- text_tracks,
+ tracks_parser.text_tracks(),
tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
tracks_parser.video_encryption_key_id(),
@@ -301,14 +277,7 @@ int WebMStreamParser::ParseCluster(const uint8* data, int size) {
const BufferQueue* text_buffers;
while (text_track_iter(&text_track_num, &text_buffers)) {
- TextTrackMap::iterator find_result = text_track_map_.find(text_track_num);
-
- if (find_result == text_track_map_.end())
- continue;
-
- TextTrack* const text_track = find_result->second;
-
- if (!text_buffers->empty() && !text_cb_.Run(text_track, *text_buffers))
+ if (!text_buffers->empty() && !text_cb_.Run(text_track_num, *text_buffers))
return -1;
}
diff --git a/chromium/media/webm/webm_stream_parser.h b/chromium/media/webm/webm_stream_parser.h
index 9c3a6d544fc..aec484b580e 100644
--- a/chromium/media/webm/webm_stream_parser.h
+++ b/chromium/media/webm/webm_stream_parser.h
@@ -5,8 +5,6 @@
#ifndef MEDIA_WEBM_WEBM_STREAM_PARSER_H_
#define MEDIA_WEBM_WEBM_STREAM_PARSER_H_
-#include <map>
-
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
#include "media/base/audio_decoder_config.h"
@@ -29,7 +27,6 @@ class WebMStreamParser : public StreamParser {
const NewBuffersCB& new_buffers_cb,
const NewTextBuffersCB& text_cb,
const NeedKeyCB& need_key_cb,
- const AddTextTrackCB& add_text_track_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
@@ -74,10 +71,6 @@ class WebMStreamParser : public StreamParser {
NewBuffersCB new_buffers_cb_;
NewTextBuffersCB text_cb_;
NeedKeyCB need_key_cb_;
- AddTextTrackCB add_text_track_cb_;
-
- typedef std::map<int, TextTrack* > TextTrackMap;
- TextTrackMap text_track_map_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
diff --git a/chromium/media/webm/webm_tracks_parser.cc b/chromium/media/webm/webm_tracks_parser.cc
index aa28d6feef9..771480fdf55 100644
--- a/chromium/media/webm/webm_tracks_parser.cc
+++ b/chromium/media/webm/webm_tracks_parser.cc
@@ -5,6 +5,7 @@
#include "media/webm/webm_tracks_parser.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "media/base/buffers.h"
#include "media/webm/webm_constants.h"
@@ -31,6 +32,7 @@ static TextKind CodecIdToTextKind(const std::string& codec_id) {
WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
+ track_uid_(-1),
seek_preroll_(-1),
codec_delay_(-1),
audio_track_num_(-1),
@@ -46,6 +48,7 @@ WebMTracksParser::~WebMTracksParser() {}
int WebMTracksParser::Parse(const uint8* buf, int size) {
track_type_ =-1;
track_num_ = -1;
+ track_uid_ = -1;
track_name_.clear();
track_language_.clear();
audio_track_num_ = -1;
@@ -101,10 +104,11 @@ bool WebMTracksParser::OnListEnd(int id) {
}
if (id == kWebMIdTrackEntry) {
- if (track_type_ == -1 || track_num_ == -1) {
+ if (track_type_ == -1 || track_num_ == -1 || track_uid_ == -1) {
MEDIA_LOG(log_cb_) << "Missing TrackEntry data for "
<< " TrackType " << track_type_
- << " TrackNum " << track_num_;
+ << " TrackNum " << track_num_
+ << " TrackUID " << track_uid_;
return false;
}
@@ -192,10 +196,11 @@ bool WebMTracksParser::OnListEnd(int id) {
MEDIA_LOG(log_cb_) << "Ignoring text track " << track_num_;
ignored_tracks_.insert(track_num_);
} else {
- TextTrackInfo& text_track_info = text_tracks_[track_num_];
- text_track_info.kind = text_track_kind;
- text_track_info.name = track_name_;
- text_track_info.language = track_language_;
+ std::string track_uid = base::Int64ToString(track_uid_);
+ text_tracks_[track_num_] = TextTrackConfig(text_track_kind,
+ track_name_,
+ track_language_,
+ track_uid);
}
} else {
MEDIA_LOG(log_cb_) << "Unexpected TrackType " << track_type_;
@@ -204,6 +209,7 @@ bool WebMTracksParser::OnListEnd(int id) {
track_type_ = -1;
track_num_ = -1;
+ track_uid_ = -1;
track_name_.clear();
track_language_.clear();
codec_id_ = "";
@@ -228,6 +234,9 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
case kWebMIdTrackType:
dst = &track_type_;
break;
+ case kWebMIdTrackUID:
+ dst = &track_uid_;
+ break;
case kWebMIdSeekPreRoll:
dst = &seek_preroll_;
break;
diff --git a/chromium/media/webm/webm_tracks_parser.h b/chromium/media/webm/webm_tracks_parser.h
index d3993207a14..d489235d08a 100644
--- a/chromium/media/webm/webm_tracks_parser.h
+++ b/chromium/media/webm/webm_tracks_parser.h
@@ -14,7 +14,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_log.h"
-#include "media/base/text_track.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/webm/webm_audio_client.h"
#include "media/webm/webm_content_encodings_client.h"
@@ -56,13 +56,7 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
return video_decoder_config_;
}
- struct TextTrackInfo {
- TextKind kind;
- std::string name;
- std::string language;
- };
-
- typedef std::map<int64, TextTrackInfo> TextTracks;
+ typedef std::map<int, TextTrackConfig> TextTracks;
const TextTracks& text_tracks() const {
return text_tracks_;
@@ -79,6 +73,7 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
int64 track_type_;
int64 track_num_;
+ int64 track_uid_;
std::string track_name_;
std::string track_language_;
std::string codec_id_;
diff --git a/chromium/media/webm/webm_tracks_parser_unittest.cc b/chromium/media/webm/webm_tracks_parser_unittest.cc
index 1ba31117892..ba1e7299f85 100644
--- a/chromium/media/webm/webm_tracks_parser_unittest.cc
+++ b/chromium/media/webm/webm_tracks_parser_unittest.cc
@@ -15,9 +15,6 @@ using ::testing::_;
namespace media {
-static const int kTypeSubtitlesOrCaptions = 0x11;
-static const int kTypeDescriptionsOrMetadata = 0x21;
-
class WebMTracksParserTest : public testing::Test {
public:
WebMTracksParserTest() {}
@@ -40,17 +37,17 @@ static void VerifyTextTrackInfo(const uint8* buffer,
const WebMTracksParser::TextTracks::const_iterator itr = text_tracks.begin();
EXPECT_EQ(itr->first, 1); // track num
- const WebMTracksParser::TextTrackInfo& info = itr->second;
- EXPECT_EQ(info.kind, text_kind);
- EXPECT_TRUE(info.name == name);
- EXPECT_TRUE(info.language == language);
+ const TextTrackConfig& config = itr->second;
+ EXPECT_EQ(config.kind(), text_kind);
+ EXPECT_TRUE(config.label() == name);
+ EXPECT_TRUE(config.language() == language);
}
TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
InSequence s;
TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
kWebMCodecSubtitles, "", "");
const std::vector<uint8> buf = tb.Finish();
@@ -61,7 +58,7 @@ TEST_F(WebMTracksParserTest, SubtitleYesNameNoLang) {
InSequence s;
TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
kWebMCodecSubtitles, "Spock", "");
const std::vector<uint8> buf = tb.Finish();
@@ -72,7 +69,7 @@ TEST_F(WebMTracksParserTest, SubtitleNoNameYesLang) {
InSequence s;
TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
kWebMCodecSubtitles, "", "eng");
const std::vector<uint8> buf = tb.Finish();
@@ -83,7 +80,7 @@ TEST_F(WebMTracksParserTest, SubtitleYesNameYesLang) {
InSequence s;
TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
kWebMCodecSubtitles, "Picard", "fre");
const std::vector<uint8> buf = tb.Finish();
@@ -94,9 +91,9 @@ TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
InSequence s;
TracksBuilder tb;
- tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(1, kWebMTrackTypeSubtitlesOrCaptions, 1,
kWebMCodecSubtitles, "Subtitles", "fre");
- tb.AddTrack(2, kWebMTrackTypeSubtitlesOrCaptions,
+ tb.AddTrack(2, kWebMTrackTypeSubtitlesOrCaptions, 2,
kWebMCodecSubtitles, "Commentary", "fre");
const std::vector<uint8> buf = tb.Finish();